python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://ieeexplore.ieee.org/document/9625818)
"""Encoder modules."""
import torch
import inspect
from layers.conv_layer import NonCausalConv1d
from layers.conv_layer import CausalConv1d
from models.autoencoder.modules.residual_unit import NonCausalResidualUnit
from models.autoencoder.modules.residual_unit import CausalResidualUnit
from models.utils import check_mode
class EncoderBlock(torch.nn.Module):
""" Encoder block (downsampling) """
def __init__(
self,
in_channels,
out_channels,
stride,
dilations=(1, 3, 9),
bias=True,
mode='causal',
):
super().__init__()
self.mode = mode
if self.mode == 'noncausal':
ResidualUnit = NonCausalResidualUnit
Conv1d = NonCausalConv1d
elif self.mode == 'causal':
ResidualUnit = CausalResidualUnit
Conv1d = CausalConv1d
else:
raise NotImplementedError(f"Mode ({self.mode}) is not supported!")
self.res_units = torch.nn.ModuleList()
for dilation in dilations:
self.res_units += [
ResidualUnit(in_channels, in_channels, dilation=dilation)]
self.num_res = len(self.res_units)
self.conv = Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(2 * stride),
stride=stride,
bias=bias,
)
def forward(self, x):
for idx in range(self.num_res):
x = self.res_units[idx](x)
x = self.conv(x)
return x
def inference(self, x):
check_mode(self.mode, inspect.stack()[0][3])
for idx in range(self.num_res):
x = self.res_units[idx].inference(x)
x = self.conv.inference(x)
return x
class Encoder(torch.nn.Module):
def __init__(self,
input_channels,
encode_channels,
channel_ratios=(2, 4, 8, 16),
strides=(3, 4, 5, 5),
kernel_size=7,
bias=True,
mode='causal',
):
super().__init__()
assert len(channel_ratios) == len(strides)
self.mode = mode
if self.mode == 'noncausal':
Conv1d = NonCausalConv1d
elif self.mode == 'causal':
Conv1d = CausalConv1d
else:
raise NotImplementedError(f"Mode ({self.mode}) is not supported!")
self.conv = Conv1d(
in_channels=input_channels,
out_channels=encode_channels,
kernel_size=kernel_size,
stride=1,
bias=False)
self.conv_blocks = torch.nn.ModuleList()
in_channels = encode_channels
for idx, stride in enumerate(strides):
out_channels = encode_channels * channel_ratios[idx]
self.conv_blocks += [
EncoderBlock(in_channels, out_channels, stride, bias=bias, mode=self.mode)]
in_channels = out_channels
self.num_blocks = len(self.conv_blocks)
self.out_channels = out_channels
def forward(self, x):
x = self.conv(x)
for i in range(self.num_blocks):
x = self.conv_blocks[i](x)
return x
def encode(self, x):
check_mode(self.mode, inspect.stack()[0][3])
x = self.conv.inference(x)
for i in range(self.num_blocks):
x = self.conv_blocks[i].inference(x)
return x
| AudioDec-main | models/autoencoder/modules/encoder.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://ieeexplore.ieee.org/document/9625818)
"""Residual Units."""
import torch
import torch.nn as nn
from layers.conv_layer import Conv1d1x1, NonCausalConv1d, CausalConv1d
class NonCausalResidualUnit(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=7,
dilation=1,
bias=False,
nonlinear_activation="ELU",
nonlinear_activation_params={},
):
super().__init__()
self.activation = getattr(nn, nonlinear_activation)(**nonlinear_activation_params)
self.conv1 = NonCausalConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation,
bias=bias,
)
self.conv2 = Conv1d1x1(out_channels, out_channels, bias)
def forward(self, x):
y = self.conv1(self.activation(x))
y = self.conv2(self.activation(y))
return x + y
class CausalResidualUnit(NonCausalResidualUnit):
def __init__(
self,
in_channels,
out_channels,
kernel_size=7,
dilation=1,
bias=False,
nonlinear_activation="ELU",
nonlinear_activation_params={},
):
super(CausalResidualUnit, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
bias=bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
self.conv1 = CausalConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation,
bias=bias,
)
def inference(self, x):
y = self.conv1.inference(self.activation(x))
y = self.conv2(self.activation(y))
return x + y | AudioDec-main | models/autoencoder/modules/residual_unit.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Projector modules."""
import torch
import inspect
from layers.conv_layer import NonCausalConv1d
from layers.conv_layer import CausalConv1d
from models.utils import check_mode
class Projector(torch.nn.Module):
def __init__(self,
input_channels,
code_dim,
kernel_size=3,
stride=1,
bias=False,
mode='causal',
model='conv1d',
):
super().__init__()
self.mode = mode
if self.mode == 'noncausal':
Conv1d = NonCausalConv1d
elif self.mode == 'causal':
Conv1d = CausalConv1d
else:
raise NotImplementedError(f"Mode ({mode}) is not supported!")
if model == 'conv1d':
self.project = Conv1d(input_channels, code_dim, kernel_size=kernel_size, stride=stride, bias=bias)
elif model == 'conv1d_bn':
self.project = torch.nn.Sequential(
Conv1d(input_channels, code_dim, kernel_size=kernel_size, stride=stride, bias=bias),
torch.nn.BatchNorm1d(code_dim)
)
else:
raise NotImplementedError(f"Model ({model}) is not supported!")
def forward(self, x):
return self.project(x)
def encode(self, x):
check_mode(self.mode, inspect.stack()[0][3])
return self.project.inference(x)
| AudioDec-main | models/autoencoder/modules/projector.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
# Reference (https://github.com/chomeyama/SiFiGAN)
"""HiFi-GAN Modules. (Causal)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.vocoder.modules.discriminator import UnivNetMultiResolutionSpectralDiscriminator
from models.vocoder.modules.discriminator import HiFiGANMultiPeriodDiscriminator
class Discriminator(nn.Module):
"""UnivNet multi-resolution spectrogram + multi-period discriminator module."""
def __init__(
self,
# Multi-resolution spectrogram discriminator related
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
spectral_discriminator_params={
"channels": 32,
"kernel_sizes": [(3, 9), (3, 9), (3, 9), (3, 9), (3, 3), (3, 3)],
"strides": [(1, 1), (1, 2), (1, 2), (1, 2), (1, 1), (1, 1)],
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
},
# Multi-period discriminator related
periods=[2, 3, 5, 7, 11],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
flat_channel=False,
):
"""Initilize HiFiGAN multi-scale + multi-period discriminator module.
Args:
fft_sizes (list): FFT sizes for each spectral discriminator.
hop_sizes (list): Hop sizes for each spectral discriminator.
win_lengths (list): Window lengths for each spectral discriminator.
window (stt): Name of window function.
sperctral_discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
flat_channel (bool):set true to flat multi-channel input to one-channel with multi-batch
"""
super().__init__()
self.flat_channel = flat_channel
self.mrsd = UnivNetMultiResolutionSpectralDiscriminator(
fft_sizes=fft_sizes,
hop_sizes=hop_sizes,
win_lengths=win_lengths,
window=window,
discriminator_params=spectral_discriminator_params,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, C, T).
Returns:
List: List of list of each discriminator outputs,
which consists of each layer output tensors.
Multi scale and multi period ones are concatenated.
"""
(batch, channel, time) = x.size()
if channel != 1 and self.flat_channel:
x = x.reshape(batch * channel, 1, time)
mrsd_outs = self.mrsd(x)
mpd_outs = self.mpd(x)
return mrsd_outs + mpd_outs
| AudioDec-main | models/vocoder/UnivNet.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
# Reference (https://github.com/jik876/hifi-gan)
"""HiFi-GAN Modules. (Causal)"""
import logging
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.conv_layer import CausalConv1d, CausalConvTranspose1d
from models.vocoder.modules.multi_fusion import MultiReceptiveField, MultiGroupConv1d
from models.vocoder.modules.discriminator import HiFiGANMultiScaleDiscriminator
from models.vocoder.modules.discriminator import HiFiGANMultiPeriodDiscriminator
class Generator(nn.Module):
"""HiFiGAN causal generator module."""
def __init__(
self,
in_channels=80,
out_channels=1,
channels=512,
kernel_size=7,
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
groups=1,
bias=True,
use_additional_convs=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
stats=None,
):
"""Initialize HiFiGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
channels (int): Number of hidden representation channels.
kernel_size (int): Kernel size of initial and final conv layer.
upsample_scales (list): List of upsampling scales.
upsample_kernel_sizes (list): List of kernel sizes for upsampling layers.
resblock_kernel_sizes (list): List of kernel sizes for residual blocks.
resblock_dilations (list): List of dilation list for residual blocks.
groups (int): Number of groups of residual conv
bias (bool): Whether to add bias parameter in convolution layers.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
stats (str): File name of the statistic file
"""
super().__init__()
# check hyperparameters are valid
assert kernel_size % 2 == 1, "Kernel size must be odd number."
assert len(upsample_scales) == len(upsample_kernel_sizes)
assert len(resblock_dilations) == len(resblock_kernel_sizes)
# Group conv or MRF
if (len(resblock_dilations) == len(resblock_kernel_sizes) == 1) and (groups > 1):
multi_fusion = MultiGroupConv1d
else:
multi_fusion = MultiReceptiveField
# define modules
self.num_upsamples = len(upsample_kernel_sizes)
self.input_conv = CausalConv1d(
in_channels,
channels,
kernel_size,
stride=1,
)
self.upsamples = nn.ModuleList()
self.blocks = nn.ModuleList()
self.activation_upsamples = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
for i in range(len(upsample_kernel_sizes)):
assert upsample_kernel_sizes[i] == 2 * upsample_scales[i]
self.upsamples += [
CausalConvTranspose1d(
channels // (2 ** i),
channels // (2 ** (i + 1)),
kernel_size=upsample_kernel_sizes[i],
stride=upsample_scales[i],
)
]
self.blocks += [
multi_fusion(
channels=channels // (2 ** (i + 1)),
resblock_kernel_sizes=resblock_kernel_sizes,
resblock_dilations=resblock_dilations,
groups=groups,
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
self.activation_output1 = nn.LeakyReLU()
self.activation_output2 = nn.Tanh()
self.output_conv = CausalConv1d(
channels // (2 ** (i + 1)),
out_channels,
kernel_size,
stride=1,
)
# load stats
if stats is not None:
self.register_stats(stats)
self.norm = True
else:
self.norm = False
logging.info(f"Input normalization: {self.norm}")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
if self.norm:
c = (c.transpose(2, 1) - self.mean) / self.scale
c = c.transpose(2, 1)
c = self.input_conv(c)
for i in range(self.num_upsamples):
c = self.upsamples[i](self.activation_upsamples(c))
c = self.blocks[i](c)
c = self.output_conv(self.activation_output1(c))
c = self.activation_output2(c)
return c
def reset_parameters(self):
"""Reset parameters.
This initialization follows the official implementation manner.
https://github.com/jik876/hifi-gan/blob/master/models.py
"""
def _reset_parameters(m):
if isinstance(m, (nn.Conv1d, nn.ConvTranspose1d)):
m.weight.data.normal_(0.0, 0.01)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, nn.Conv1d) or isinstance(
m, nn.ConvTranspose1d
):
nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def register_stats(self, stats):
"""Register stats for de-normalization as buffer.
Args:
stats (str): Path of statistics file (".npy" or ".h5").
"""
assert stats.endswith(".h5") or stats.endswith(".npy")
assert os.path.exists(stats), f"Stats {stats} does not exist!"
mean = np.load(stats)[0].reshape(-1)
scale = np.load(stats)[1].reshape(-1)
self.register_buffer("mean", torch.from_numpy(mean).float())
self.register_buffer("scale", torch.from_numpy(scale).float())
logging.info("Successfully registered stats as buffer.")
class StreamGenerator(Generator):
"""HiFiGAN streaming generator."""
def __init__(
self,
in_channels=80,
out_channels=1,
channels=512,
kernel_size=7,
upsample_scales=(8, 8, 2, 2),
upsample_kernel_sizes=(16, 16, 4, 4),
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
groups=1,
bias=True,
use_additional_convs=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
stats=None,
):
super(StreamGenerator, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
channels=channels,
kernel_size=kernel_size,
upsample_scales=upsample_scales,
upsample_kernel_sizes=upsample_kernel_sizes,
resblock_kernel_sizes=resblock_kernel_sizes,
resblock_dilations=resblock_dilations,
groups=groups,
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
use_weight_norm=use_weight_norm,
stats=stats,
)
self.reset_buffer()
def initial_decoder(self, c):
self.decode(c)
def decode(self, c):
c = self.decode_norm(c)
c = self.decode_input(c.transpose(2, 1))
c = self.decode_upsample(c)
c = self.decode_output(c)
return c
def decode_norm(self, c):
if self.norm:
c = (c - self.mean) / self.scale
return c
def decode_input(self, c):
c = self.input_conv.inference(c)
return c
def decode_upsample(self, c):
for i in range(self.num_upsamples):
c = self.upsamples[i].inference(self.activation_upsamples(c))
c = self.blocks[i].inference(c)
return c
def decode_output(self, c):
c = self.output_conv.inference(self.activation_output1(c))
return self.activation_output2(c)
def reset_buffer(self):
"""Apply weight normalization module from all layers."""
def _reset_buffer(m):
if isinstance(m, CausalConv1d) or isinstance(m, CausalConvTranspose1d):
m.reset_buffer()
self.apply(_reset_buffer)
class Discriminator(nn.Module):
"""HiFi-GAN multi-scale + multi-period discriminator module."""
def __init__(
self,
# Multi-scale discriminator related
scales=3,
scale_downsample_pooling="AvgPool1d",
scale_downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
scale_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=True,
# Multi-period discriminator related
periods=[2, 3, 5, 7, 11],
period_discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initilize HiFiGAN multi-scale + multi-period discriminator module.
Args:
scales (int): Number of multi-scales.
scale_downsample_pooling (str): Pooling module name for downsampling of the inputs.
scale_downsample_pooling_params (dict): Parameters for the above pooling module.
scale_discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementaion. The first discriminator uses spectral norm and the other
discriminators use weight norm.
periods (list): List of periods.
period_discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.msd = HiFiGANMultiScaleDiscriminator(
scales=scales,
downsample_pooling=scale_downsample_pooling,
downsample_pooling_params=scale_downsample_pooling_params,
discriminator_params=scale_discriminator_params,
follow_official_norm=follow_official_norm,
)
self.mpd = HiFiGANMultiPeriodDiscriminator(
periods=periods,
discriminator_params=period_discriminator_params,
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, C, T).
Returns:
List: List of list of each discriminator outputs,
which consists of each layer output tensors.
Multi scale and multi period ones are concatenated.
"""
(batch, channel, time) = x.size()
if channel != 1:
x = x.reshape(batch * channel, 1, time)
msd_outs = self.msd(x)
mpd_outs = self.mpd(x)
return msd_outs + mpd_outs
| AudioDec-main | models/vocoder/HiFiGAN.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
# Reference (https://github.com/r9y9/wavenet_vocoder)
# Reference (https://github.com/jik876/hifi-gan)
"""Multi-fusion modules."""
import math
import torch
import torch.nn as nn
from layers.conv_layer import CausalConv1d, Conv1d1x1
from models.vocoder.modules.residual_block import HiFiGANResidualBlock
class MultiReceptiveField(nn.Module):
"""Multi-receptive field module in HiFiGAN."""
def __init__(
self,
channels=512,
resblock_kernel_sizes=(3, 7, 11),
resblock_dilations=[(1, 3, 5), (1, 3, 5), (1, 3, 5)],
groups=1,
bias=True,
use_additional_convs=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
):
assert len(resblock_kernel_sizes) == len(resblock_dilations)
super().__init__()
self.num_blocks = len(resblock_kernel_sizes)
self.blocks = nn.ModuleList()
for i in range(self.num_blocks):
self.blocks += [
HiFiGANResidualBlock(
kernel_size=resblock_kernel_sizes[i],
channels=channels,
dilations=resblock_dilations[i],
groups=groups,
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
]
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, channels, T).
"""
cs = 0.0 # initialize
for i in range(self.num_blocks):
cs += self.blocks[i](c)
c = cs / self.num_blocks
return c
def inference(self, c):
cs = 0.0 # initialize
for i in range(self.num_blocks):
cs += self.blocks[i].inference(c)
c = cs / self.num_blocks
return c
class MultiGroupConv1d(HiFiGANResidualBlock):
"""Multi-group convolution module."""
def __init__(
self,
channels=512,
resblock_kernel_sizes=(3),
resblock_dilations=[(1, 3, 5)],
groups=3,
bias=True,
use_additional_convs=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
):
assert len(resblock_kernel_sizes) == len(resblock_dilations) == 1
super(MultiGroupConv1d, self).__init__(
kernel_size=resblock_kernel_sizes[0],
channels=channels*groups,
dilations=resblock_dilations[0],
groups=groups,
bias=bias,
use_additional_convs=use_additional_convs,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
)
self.groups = groups
self.conv_out = Conv1d1x1(
in_channels=channels*groups,
out_channels=channels,
bias=False,
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, channels, T).
"""
x = x.repeat(1, self.groups, 1) # (B, n*C, T)
for idx in range(self.num_layer):
xt = self.convs1[idx](self.activation(x))
if self.use_additional_convs:
xt = self.convs2[idx](self.activation(xt))
x = xt + x
x = self.conv_out(x) # (B, C, T)
return x
def inference(self, x):
x = x.repeat(1, self.groups, 1) # (B, n*C, T)
for idx in range(self.num_layer):
xt = self.convs1[idx].inference(self.activation(x))
if self.use_additional_convs:
xt = self.convs2[idx].inference(self.activation(xt))
x = xt + x
x = self.conv_out(x) # (B, C, T)
return x | AudioDec-main | models/vocoder/modules/multi_fusion.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
# Reference (https://github.com/r9y9/wavenet_vocoder)
# Reference (https://github.com/jik876/hifi-gan)
"""Residual block modules."""
import math
import torch
import torch.nn as nn
from layers.conv_layer import CausalConv1d, Conv1d1x1
class HiFiGANResidualBlock(nn.Module):
"""Causal Residual block module in HiFiGAN."""
def __init__(
self,
kernel_size=3,
channels=512,
dilations=(1, 3, 5),
groups=1,
bias=True,
use_additional_convs=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
):
"""Initialize CausalResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels for convolution layer.
dilations (List[int]): List of dilation factors.
use_additional_convs (bool): Whether to use additional convolution layers.
groups (int): The group number of conv1d (default: 1)
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
"""
super().__init__()
self.use_additional_convs = use_additional_convs
self.convs1 = nn.ModuleList()
if use_additional_convs:
self.convs2 = nn.ModuleList()
assert kernel_size % 2 == 1, "Kernel size must be odd number."
self.activation = getattr(nn, nonlinear_activation)(**nonlinear_activation_params)
for dilation in dilations:
self.convs1 += [
CausalConv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation,
groups=groups,
bias=bias,
)
]
if use_additional_convs:
self.convs2 += [
CausalConv1d(
in_channels=channels,
out_channels=channels,
kernel_size=kernel_size,
stride=1,
dilation=1,
groups=groups,
bias=bias,
)
]
self.num_layer = len(self.convs1)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, channels, T).
"""
for idx in range(self.num_layer):
xt = self.convs1[idx](self.activation(x))
if self.use_additional_convs:
xt = self.convs2[idx](self.activation(xt))
x = xt + x
return x
def inference(self, x):
for idx in range(self.num_layer):
xt = self.convs1[idx].inference(self.activation(x))
if self.use_additional_convs:
xt = self.convs2[idx].inference(self.activation(xt))
x = xt + x
return x | AudioDec-main | models/vocoder/modules/residual_block.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
# Reference (https://github.com/jik876/hifi-gan)
# Reference (https://github.com/chomeyama/SiFiGAN)
"""GAN-based Discriminators"""
import copy
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio.functional import spectrogram
class HiFiGANPeriodDiscriminator(nn.Module):
"""HiFiGAN period discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
period=3,
kernel_sizes=[5, 3],
channels=32,
downsample_scales=[3, 3, 3, 3, 1],
max_downsample_channels=1024,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_spectral_norm=False,
):
"""Initialize HiFiGANPeriodDiscriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
period (int): Period.
kernel_sizes (list): Kernel sizes of initial conv layers and the final conv layer.
channels (int): Number of initial channels.
downsample_scales (list): List of downsampling scales.
max_downsample_channels (int): Number of maximum downsampling channels.
use_additional_convs (bool): Whether to use additional conv layers in residual blocks.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1, "Kernel size must be odd number."
assert kernel_sizes[1] % 2 == 1, "Kernel size must be odd number."
self.period = period
self.convs = nn.ModuleList()
in_chs = in_channels
out_chs = channels
for downsample_scale in downsample_scales:
self.convs += [
torch.nn.Sequential(
torch.nn.Conv2d(
in_chs,
out_chs,
(kernel_sizes[0], 1),
(downsample_scale, 1),
padding=((kernel_sizes[0] - 1) // 2, 0),
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Use downsample_scale + 1?
out_chs = min(out_chs * 4, max_downsample_channels)
self.output_conv = torch.nn.Conv2d(
out_chs,
out_channels,
(kernel_sizes[1] - 1, 1),
1,
padding=((kernel_sizes[1] - 1) // 2, 0),
)
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, in_channels, T).
Returns:
list: List of each layer's tensors.
"""
# transform 1d to 2d -> (B, C, T/P, P)
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t += n_pad
x = x.view(b, c, t // self.period, self.period)
# forward conv
outs = []
for layer in self.convs:
x = layer(x)
outs += [x]
x = self.output_conv(x)
x = torch.flatten(x, 1, -1)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiPeriodDiscriminator(nn.Module):
"""HiFiGAN multi-period discriminator module."""
def __init__(
self,
periods=[2, 3, 5, 7, 11],
discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [5, 3],
"channels": 32,
"downsample_scales": [3, 3, 3, 3, 1],
"max_downsample_channels": 1024,
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
"use_weight_norm": True,
"use_spectral_norm": False,
},
):
"""Initialize HiFiGANMultiPeriodDiscriminator module.
Args:
periods (list): List of periods.
discriminator_params (dict): Parameters for hifi-gan period discriminator module.
The period parameter will be overwritten.
"""
super().__init__()
self.discriminators = nn.ModuleList()
for period in periods:
params = copy.deepcopy(discriminator_params)
params["period"] = period
self.discriminators += [HiFiGANPeriodDiscriminator(**params)]
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
return outs
class HiFiGANScaleDiscriminator(nn.Module):
"""HiFi-GAN scale discriminator module."""
def __init__(
self,
in_channels=1,
out_channels=1,
kernel_sizes=[15, 41, 5, 3],
channels=128,
max_downsample_channels=1024,
max_groups=16,
bias=True,
downsample_scales=[2, 2, 4, 4, 1],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.1},
use_weight_norm=True,
use_spectral_norm=False,
):
"""Initilize HiFiGAN scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (list): List of four kernel sizes. The first will be used for the first conv layer,
and the second is for downsampling part, and the remaining two are for output layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_spectral_norm (bool): Whether to use spectral norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.layers = nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 4
for ks in kernel_sizes:
assert ks % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_channels,
channels,
# NOTE(kan-bayashi): Use always the same kernel size
kernel_sizes[0],
bias=bias,
padding=(kernel_sizes[0] - 1) // 2,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
out_chs = channels
# NOTE(kan-bayashi): Remove hard coding?
groups = 4
for downsample_scale in downsample_scales:
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[1],
stride=downsample_scale,
padding=(kernel_sizes[1] - 1) // 2,
groups=groups,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(
**nonlinear_activation_params
),
)
]
in_chs = out_chs
# NOTE(kan-bayashi): Remove hard coding?
out_chs = min(in_chs * 2, max_downsample_channels)
# NOTE(kan-bayashi): Remove hard coding?
groups = min(groups * 4, max_groups)
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs,
out_chs,
kernel_size=kernel_sizes[2],
stride=1,
padding=(kernel_sizes[2] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs,
out_channels,
kernel_size=kernel_sizes[3],
stride=1,
padding=(kernel_sizes[3] - 1) // 2,
bias=bias,
),
]
if use_weight_norm and use_spectral_norm:
raise ValueError("Either use use_weight_norm or use_spectral_norm.")
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# apply spectral norm
if use_spectral_norm:
self.apply_spectral_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def apply_spectral_norm(self):
"""Apply spectral normalization module from all of the layers."""
def _apply_spectral_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.spectral_norm(m)
logging.debug(f"Spectral norm is applied to {m}.")
self.apply(_apply_spectral_norm)
class HiFiGANMultiScaleDiscriminator(nn.Module):
"""HiFi-GAN multi-scale discriminator module."""
def __init__(
self,
scales=3,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 2,
},
discriminator_params={
"in_channels": 1,
"out_channels": 1,
"kernel_sizes": [15, 41, 5, 3],
"channels": 128,
"max_downsample_channels": 1024,
"max_groups": 16,
"bias": True,
"downsample_scales": [2, 2, 4, 4, 1],
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.1},
},
follow_official_norm=False,
):
"""Initilize HiFiGAN multi-scale discriminator module.
Args:
scales (int): Number of multi-scales.
downsample_pooling (str): Pooling module name for downsampling of the inputs.
downsample_pooling_params (dict): Parameters for the above pooling module.
discriminator_params (dict): Parameters for hifi-gan scale discriminator module.
follow_official_norm (bool): Whether to follow the norm setting of the official
implementaion. The first discriminator uses spectral norm and the other
discriminators use weight norm.
"""
super().__init__()
self.discriminators = nn.ModuleList()
# add discriminators
for i in range(scales):
params = copy.deepcopy(discriminator_params)
if follow_official_norm:
if i == 0:
params["use_weight_norm"] = False
params["use_spectral_norm"] = True
else:
params["use_weight_norm"] = True
params["use_spectral_norm"] = False
self.discriminators += [HiFiGANScaleDiscriminator(**params)]
self.pooling = getattr(torch.nn, downsample_pooling)(
**downsample_pooling_params
)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
x = self.pooling(x)
return outs
class UnivNetSpectralDiscriminator(nn.Module):
"""UnivNet spectral discriminator module."""
def __init__(
self,
fft_size,
hop_size,
win_length,
window="hann_window",
kernel_sizes=[(3, 9), (3, 9), (3, 9), (3, 9), (3, 3), (3, 3)],
strides=[(1, 1), (1, 2), (1, 2), (1, 2), (1, 1), (1, 1)],
channels=32,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
use_weight_norm=True,
):
"""Initilize HiFiGAN scale discriminator module.
Args:
fft_size (list): FFT size.
hop_size (int): Hop size.
win_length (int): Window length.
window (stt): Name of window function.
kernel_sizes (list): List of kernel sizes in down-sampling CNNs.
strides (list): List of stride sizes in down-sampling CNNs.
channels (int): Number of channels for conv layer.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super().__init__()
self.fft_size = fft_size
self.hop_size = hop_size
self.win_length = win_length
self.register_buffer("window", getattr(torch, window)(win_length))
self.layers = nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == len(strides)
# add first layer
self.layers += [
nn.Sequential(
nn.Conv2d(
1,
channels,
kernel_sizes[0],
stride=strides[0],
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
for i in range(1, len(kernel_sizes) - 2):
self.layers += [
nn.Sequential(
nn.Conv2d(
channels,
channels,
kernel_size=kernel_sizes[i],
stride=strides[i],
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add final layers
self.layers += [
nn.Sequential(
nn.Conv2d(
channels,
channels,
kernel_size=kernel_sizes[-2],
stride=strides[-2],
bias=bias,
),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
nn.Conv2d(
channels,
1,
kernel_size=kernel_sizes[-1],
stride=strides[-1],
bias=bias,
)
]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of output tensors of each layer.
"""
x = spectrogram(
x,
pad=self.win_length // 2,
window=self.window,
n_fft=self.fft_size,
hop_length=self.hop_size,
win_length=self.win_length,
power=1.0,
normalized=False,
).transpose(-1, -2)
for f in self.layers:
x = f(x)
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
class UnivNetMultiResolutionSpectralDiscriminator(nn.Module):
"""UnivNet multi-resolution spectral discriminator module."""
def __init__(
self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
discriminator_params={
"channels": 32,
"kernel_sizes": [(3, 9), (3, 9), (3, 9), (3, 9), (3, 3), (3, 3)],
"strides": [(1, 1), (1, 2), (1, 2), (1, 2), (1, 1), (1, 1)],
"bias": True,
"nonlinear_activation": "LeakyReLU",
"nonlinear_activation_params": {"negative_slope": 0.2},
},
):
"""Initilize UnivNetMultiResolutionSpectralDiscriminator module.
Args:
fft_sizes (list): FFT sizes for each spectral discriminator.
hop_sizes (list): Hop sizes for each spectral discriminator.
win_lengths (list): Window lengths for each spectral discriminator.
window (stt): Name of window function.
discriminator_params (dict): Parameters for univ-net spectral discriminator module.
"""
super().__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.discriminators = nn.ModuleList()
# add discriminators
for i in range(len(fft_sizes)):
params = copy.deepcopy(discriminator_params)
self.discriminators += [
UnivNetSpectralDiscriminator(
fft_size=fft_sizes[i],
hop_size=hop_sizes[i],
win_length=win_lengths[i],
window=window,
**params,
)
]
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
out = f(x)
outs.append(out)
return outs | AudioDec-main | models/vocoder/modules/discriminator.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""Training flow of GAN-based vocoder."""
import logging
import torch
from trainer.trainerGAN import TrainerGAN
class Trainer(TrainerGAN):
def __init__(
self,
steps,
epochs,
data_loader,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
super(Trainer, self).__init__(
steps=steps,
epochs=epochs,
data_loader=data_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
config=config,
device=device,
)
self.fix_analyzer = False
self.generator_start = config.get("generator_train_start_steps", 0)
self.discriminator_start = config.get("discriminator_train_start_steps", 0)
def _train_step(self, batch):
"""Train model one step."""
mode = 'train'
x = batch
x = x.to(self.device)
# fix analyzer
if not self.fix_analyzer:
for parameter in self.model["analyzer"].parameters():
parameter.requires_grad = False
self.fix_analyzer = True
logging.info("Analyzer is fixed!")
self.model["analyzer"].eval()
#######################
# Generator #
#######################
if self.steps > self.generator_start:
# initialize generator loss
gen_loss = 0.0
# main genertor operation
e = self.model["analyzer"].encoder(x)
z = self.model["analyzer"].projector(e)
zq, _, _ = self.model["analyzer"].quantizer(z)
y_ = self.model["generator"](zq)
# metric loss
gen_loss += self._metric_loss(y_, x, mode=mode)
# adversarial loss
if self.steps > self.discriminator_start:
p_ = self.model["discriminator"](y_)
if self.config["use_feat_match_loss"]:
with torch.no_grad():
p = self.model["discriminator"](x)
else:
p = None
gen_loss += self._adv_loss(p_, p, mode=mode)
# update generator
self._record_loss('generator_loss', gen_loss, mode=mode)
self._update_generator(gen_loss)
#######################
# Discriminator #
#######################
if self.steps > self.discriminator_start:
# re-compute y_ which leads better quality
with torch.no_grad():
e = self.model["analyzer"].encoder(x)
z = self.model["analyzer"].projector(e)
zq, _, _ = self.model["analyzer"].quantizer(z)
y_ = self.model["generator"](zq)
p = self.model["discriminator"](x)
p_ = self.model["discriminator"](y_.detach())
# discriminator loss & update discriminator
self._update_discriminator(self._dis_loss(p_, p, mode=mode))
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
@torch.no_grad()
def _eval_step(self, batch):
"""Single step of evaluation."""
mode = 'eval'
x = batch
x = x.to(self.device)
# initialize generator loss
gen_loss = 0.0
# main genertor operation
e = self.model["analyzer"].encoder(x)
z = self.model["analyzer"].projector(e)
zq, _, _ = self.model["analyzer"].quantizer(z)
y_ = self.model["generator"](zq)
# metric loss
gen_loss += self._metric_loss(y_, x, mode=mode)
# adversarial loss & feature matching loss
if self.steps > self.discriminator_start:
p_ = self.model["discriminator"](y_)
if self.config["use_feat_match_loss"]:
p = self.model["discriminator"](x)
else:
p = None
gen_loss += self._adv_loss(p_, p, mode=mode)
# discriminator loss
self._dis_loss(p_, p, mode=mode)
# generator loss
self._record_loss('generator_loss', gen_loss, mode=mode)
| AudioDec-main | trainer/vocoder.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""Template GAN training flow."""
import logging
import os
import abc
import torch
from collections import defaultdict
from tensorboardX import SummaryWriter
from tqdm import tqdm
class TrainerGAN(abc.ABC):
def __init__(
self,
steps,
epochs,
data_loader,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
data_loader (dict): Dict of data loaders. It must contrain "train" and "dev" loaders.
model (dict): Dict of models. It must contrain "generator" and "discriminator" models.
criterion (dict): Dict of criterions. It must contrain "stft" and "mse" criterions.
optimizer (dict): Dict of optimizers. It must contrain "generator" and "discriminator" optimizers.
scheduler (dict): Dict of schedulers. It must contrain "generator" and "discriminator" schedulers.
config (dict): Config dict loaded from yaml format configuration file.
device (torch.deive): Pytorch device instance.
"""
self.steps = steps
self.epochs = epochs
self.data_loader = data_loader
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.config = config
self.device = device
self.writer = SummaryWriter(config["outdir"])
self.total_train_loss = defaultdict(float)
self.total_eval_loss = defaultdict(float)
self.train_max_steps = config.get("train_max_steps", 0)
@abc.abstractmethod
def _train_step(self, batch):
"""Single step of training."""
pass
@abc.abstractmethod
def _eval_step(self, batch):
"""Single step of evaluation."""
pass
def run(self):
"""Run training."""
self.finish_train = False
self.tqdm = tqdm(
initial=self.steps, total=self.train_max_steps, desc="[train]"
)
while True:
self._train_epoch()
# check whether training is finished
if self.finish_train:
break
self.tqdm.close()
logging.info("Finished training.")
def save_checkpoint(self, checkpoint_path):
"""Save checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be saved.
"""
state_dict = {
"optimizer": {
"generator": self.optimizer["generator"].state_dict(),
"discriminator": self.optimizer["discriminator"].state_dict(),
},
"scheduler": {
"generator": self.scheduler["generator"].state_dict(),
"discriminator": self.scheduler["discriminator"].state_dict(),
},
"steps": self.steps,
"epochs": self.epochs,
}
state_dict["model"] = {
"generator": self.model["generator"].state_dict(),
"discriminator": self.model["discriminator"].state_dict(),
}
if not os.path.exists(os.path.dirname(checkpoint_path)):
os.makedirs(os.path.dirname(checkpoint_path))
torch.save(state_dict, checkpoint_path)
def load_checkpoint(self, checkpoint_path, strict=True, load_only_params=False, load_discriminator=True):
"""Load checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be loaded.
load_only_params (bool): Whether to load only model parameters.
load_discriminator (bool): Whether to load optimizer and scheduler of the discriminators.
"""
state_dict = torch.load(checkpoint_path, map_location="cpu")
self.model["generator"].load_state_dict(
state_dict["model"]["generator"], strict=strict)
self.model["discriminator"].load_state_dict(
state_dict["model"]["discriminator"], strict=strict)
if not load_only_params:
self.steps = state_dict["steps"]
self.epochs = state_dict["epochs"]
self.optimizer["generator"].load_state_dict(
state_dict["optimizer"]["generator"])
self.scheduler["generator"].load_state_dict(
state_dict["scheduler"]["generator"])
if load_discriminator:
self.optimizer["discriminator"].load_state_dict(
state_dict["optimizer"]["discriminator"])
self.scheduler["discriminator"].load_state_dict(
state_dict["scheduler"]["discriminator"])
def _train_epoch(self):
"""One epoch of training."""
for train_steps_per_epoch, batch in enumerate(self.data_loader["train"], 1):
# train one step
self._train_step(batch)
# check interval
self._check_log_interval()
self._check_eval_interval()
self._check_save_interval()
# check whether training is finished
if self.finish_train:
return
# update
self.epochs += 1
self.train_steps_per_epoch = train_steps_per_epoch
if train_steps_per_epoch > 200:
logging.info(
f"(Steps: {self.steps}) Finished {self.epochs} epoch training "
f"({self.train_steps_per_epoch} steps per epoch)."
)
def _eval_epoch(self):
"""One epoch of evaluation."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# change mode
for key in self.model.keys():
self.model[key].eval()
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(
tqdm(self.data_loader["dev"], desc="[eval]"), 1
):
# eval one step
self._eval_step(batch)
logging.info(
f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch)."
)
# average loss
for key in self.total_eval_loss.keys():
self.total_eval_loss[key] /= eval_steps_per_epoch
logging.info(
f"(Steps: {self.steps}) {key} = {self.total_eval_loss[key]:.4f}."
)
# record
self._write_to_tensorboard(self.total_eval_loss)
# reset
self.total_eval_loss = defaultdict(float)
# restore mode
for key in self.model.keys():
self.model[key].train()
def _metric_loss(self, predict_y, natural_y, mode='train'):
"""Metric losses."""
metric_loss=0.0
# mel spectrogram loss
if self.config.get('use_mel_loss', False):
mel_loss = self.criterion["mel"](predict_y, natural_y)
mel_loss *= self.config["lambda_mel_loss"]
self._record_loss('mel_loss', mel_loss, mode=mode)
metric_loss += mel_loss
# multi-resolution sfft loss
if self.config.get('use_stft_loss', False):
sc_loss, mag_loss = self.criterion["stft"](predict_y, natural_y)
sc_loss *= self.config["lambda_stft_loss"]
mag_loss *= self.config["lambda_stft_loss"]
self._record_loss('spectral_convergence_loss', sc_loss, mode=mode)
self._record_loss('log_stft_magnitude_loss', mag_loss, mode=mode)
metric_loss += (sc_loss + mag_loss)
# waveform shape loss
if self.config.get("use_shape_loss", False):
shape_loss = self.criterion["shape"](predict_y, natural_y)
shape_loss *= self.config["lambda_shape_loss"]
self._record_loss('shape_loss', shape_loss, mode=mode)
metric_loss += shape_loss
return metric_loss
def _adv_loss(self, predict_p, natural_p=None, mode='train'):
"""Adversarial loss."""
adv_loss = self.criterion["gen_adv"](predict_p)
# feature matching loss
if natural_p is not None:
fm_loss = self.criterion["feat_match"](predict_p, natural_p)
self._record_loss('feature_matching_loss', fm_loss, mode=mode)
adv_loss += self.config["lambda_feat_match"] * fm_loss
adv_loss *= self.config["lambda_adv"]
self._record_loss('adversarial_loss', adv_loss, mode=mode)
return adv_loss
def _dis_loss(self, predict_p, natural_p, mode='train'):
"""Discriminator loss."""
real_loss, fake_loss = self.criterion["dis_adv"](predict_p, natural_p)
dis_loss = real_loss + fake_loss
self._record_loss('real_loss', real_loss, mode=mode)
self._record_loss('fake_loss', fake_loss, mode=mode)
self._record_loss('discriminator_loss', dis_loss, mode=mode)
return dis_loss
def _update_generator(self, gen_loss):
"""Update generator."""
self.optimizer["generator"].zero_grad()
gen_loss.backward()
if self.config["generator_grad_norm"] > 0:
torch.nn.utils.clip_grad_norm_(
self.model["generator"].parameters(),
self.config["generator_grad_norm"],
)
self.optimizer["generator"].step()
self.scheduler["generator"].step()
def _update_discriminator(self, dis_loss):
"""Update discriminator."""
self.optimizer["discriminator"].zero_grad()
dis_loss.backward()
if self.config["discriminator_grad_norm"] > 0:
torch.nn.utils.clip_grad_norm_(
self.model["discriminator"].parameters(),
self.config["discriminator_grad_norm"],
)
self.optimizer["discriminator"].step()
self.scheduler["discriminator"].step()
def _record_loss(self, name, loss, mode='train'):
"""Record loss."""
if torch.is_tensor(loss):
loss = loss.item()
if mode == 'train':
self.total_train_loss[f"train/{name}"] += loss
elif mode == 'eval':
self.total_eval_loss[f"eval/{name}"] += loss
else:
raise NotImplementedError(f"Mode ({mode}) is not supported!")
def _write_to_tensorboard(self, loss):
"""Write to tensorboard."""
for key, value in loss.items():
self.writer.add_scalar(key, value, self.steps)
def _check_save_interval(self):
if self.steps and (self.steps % self.config["save_interval_steps"] == 0):
self.save_checkpoint(
os.path.join(self.config["outdir"], f"checkpoint-{self.steps}steps.pkl")
)
logging.info(f"Successfully saved checkpoint @ {self.steps} steps.")
def _check_eval_interval(self):
if self.steps % self.config["eval_interval_steps"] == 0:
self._eval_epoch()
def _check_log_interval(self):
if self.steps % self.config["log_interval_steps"] == 0:
for key in self.total_train_loss.keys():
self.total_train_loss[key] /= self.config["log_interval_steps"]
logging.info(
f"(Steps: {self.steps}) {key} = {self.total_train_loss[key]:.4f}."
)
self._write_to_tensorboard(self.total_train_loss)
# reset
self.total_train_loss = defaultdict(float)
def _check_train_finish(self):
if self.steps >= self.train_max_steps:
self.finish_train = True
else:
self.finish_train = False
return self.finish_train
class TrainerVQGAN(TrainerGAN):
def __init__(
self,
steps,
epochs,
data_loader,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
super(TrainerVQGAN, self).__init__(
steps=steps,
epochs=epochs,
data_loader=data_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
config=config,
device=device,
)
# perplexity info
def _perplexity(self, perplexity, label=None, mode='train'):
if label:
name = f"{mode}/ppl_{label}"
else:
name = f"{mode}/ppl"
if torch.numel(perplexity) > 1:
perplexity = perplexity.tolist()
for idx, ppl in enumerate(perplexity):
self._record_loss(f"{name}_{idx}", ppl, mode=mode)
else:
self._record_loss(name, perplexity, mode=mode)
# vq loss
def _vq_loss(self, vqloss, label=None, mode='train'):
if label:
name = f"{mode}/vqloss_{label}"
else:
name = f"{mode}/vqloss"
vqloss = torch.sum(vqloss)
vqloss *= self.config["lambda_vq_loss"]
self._record_loss(name, vqloss, mode=mode)
return vqloss
| AudioDec-main | trainer/trainerGAN.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""Training flow of symmetric codec."""
import logging
import torch
from trainer.trainerGAN import TrainerVQGAN
class Trainer(TrainerVQGAN):
def __init__(
self,
steps,
epochs,
data_loader,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
super(Trainer, self).__init__(
steps=steps,
epochs=epochs,
data_loader=data_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
config=config,
device=device,
)
self.fix_encoder = False
self.paradigm = config.get('paradigm', 'efficient')
self.generator_start = config.get('start_steps', {}).get('generator', 0)
self.discriminator_start = config.get('start_steps', {}).get('discriminator', 200000)
def _train_step(self, batch):
"""Single step of training."""
mode = 'train'
x = batch
x = x.to(self.device)
# check generator step
if self.steps < self.generator_start:
self.generator_train = False
else:
self.generator_train = True
# check discriminator step
if self.steps < self.discriminator_start:
self.discriminator_train = False
else:
self.discriminator_train = True
if (not self.fix_encoder) and (self.paradigm == 'efficient'):
# fix encoder, quantizer, and codebook
for parameter in self.model["generator"].encoder.parameters():
parameter.requires_grad = False
for parameter in self.model["generator"].projector.parameters():
parameter.requires_grad = False
for parameter in self.model["generator"].quantizer.parameters():
parameter.requires_grad = False
self.fix_encoder = True
logging.info("Encoder, projector, quantizer, and codebook are fixed")
# check codebook updating
if self.fix_encoder:
self.model["generator"].quantizer.codebook.eval()
#######################
# Generator #
#######################
if self.generator_train:
# initialize generator loss
gen_loss = 0.0
# main genertor operation
y_, zq, z, vqloss, perplexity = self.model["generator"](x)
# perplexity info
self._perplexity(perplexity, mode=mode)
# vq loss
gen_loss += self._vq_loss(vqloss, mode=mode)
# metric loss
gen_loss += self._metric_loss(y_, x, mode=mode)
# adversarial loss
if self.discriminator_train:
p_ = self.model["discriminator"](y_)
if self.config["use_feat_match_loss"]:
with torch.no_grad():
p = self.model["discriminator"](x)
else:
p = None
gen_loss += self._adv_loss(p_, p, mode=mode)
# update generator
self._record_loss('generator_loss', gen_loss, mode=mode)
self._update_generator(gen_loss)
#######################
# Discriminator #
#######################
if self.discriminator_train:
# re-compute y_ which leads better quality
with torch.no_grad():
y_, _, _, _, _ = self.model["generator"](x)
p = self.model["discriminator"](x)
p_ = self.model["discriminator"](y_.detach())
# discriminator loss & update discriminator
self._update_discriminator(self._dis_loss(p_, p, mode=mode))
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
@torch.no_grad()
def _eval_step(self, batch):
"""Single step of evaluation."""
mode = 'eval'
x = batch
x = x.to(self.device)
# initialize generator loss
gen_loss = 0.0
# main genertor operation
y_, zq, z, vqloss, perplexity = self.model["generator"](x)
# perplexity info
self._perplexity(perplexity, mode=mode)
# vq_loss
gen_loss += self._vq_loss(vqloss, mode=mode)
# metric loss
gen_loss += self._metric_loss(y_, x, mode=mode)
if self.discriminator_train:
# adversarial loss
p_ = self.model["discriminator"](y_)
p = self.model["discriminator"](x)
gen_loss += self._adv_loss(p_, p, mode=mode)
# discriminator loss
self._dis_loss(p_, p, mode=mode)
# generator loss
self._record_loss('generator_loss', gen_loss, mode=mode)
| AudioDec-main | trainer/autoencoder.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""Training flow of symmetric codec."""
import logging
import torch
from trainer.trainerGAN import TrainerVQGAN
class Trainer(TrainerVQGAN):
def __init__(
self,
steps,
epochs,
data_loader,
model,
criterion,
optimizer,
scheduler,
config,
device=torch.device("cpu"),
):
super(Trainer, self).__init__(
steps=steps,
epochs=epochs,
data_loader=data_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
config=config,
device=device,
)
# fix quantizer
for parameter in self.model["generator"].quantizer.parameters():
parameter.requires_grad = False
# fix decoder
for parameter in self.model["generator"].decoder.parameters():
parameter.requires_grad = False
logging.info("Quantizer, codebook, and decoder are fixed")
def _train_step(self, batch):
"""Single step of training."""
mode = 'train'
x_n, x_c = batch
x_n = x_n.to(self.device)
x_c = x_c.to(self.device)
# fix codebook
self.model["generator"].quantizer.codebook.eval()
# initialize generator loss
gen_loss = 0.0
# main genertor operation
y_nc, zq, z, vqloss, perplexity = self.model["generator"](x_n)
# perplexity info
self._perplexity(perplexity, mode=mode)
# vq loss
gen_loss += self._vq_loss(vqloss, mode=mode)
# metric loss
gen_loss += self._metric_loss(y_nc, x_c, mode=mode)
# update generator
self._record_loss('generator_loss', gen_loss, mode=mode)
self._update_generator(gen_loss)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
@torch.no_grad()
def _eval_step(self, batch):
"""Single step of evaluation."""
mode = 'eval'
x_n, x_c = batch
x_n = x_n.to(self.device)
x_c = x_c.to(self.device)
# initialize generator loss
gen_loss = 0.0
# main genertor operation
y_nc, zq, z, vqloss, perplexity = self.model["generator"](x_n)
# perplexity info
self._perplexity(perplexity, mode=mode)
# vq_loss
gen_loss += self._vq_loss(vqloss, mode=mode)
# metric loss
gen_loss += self._metric_loss(y_nc, x_c, mode=mode)
# generator loss
self._record_loss('generator_loss', gen_loss, mode=mode)
| AudioDec-main | trainer/denoise.py |
from .dataset import * # NOQA
from .collater import * # NOQA
from .utils import * # NOQA | AudioDec-main | dataloader/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""PyTorch compatible dataset modules."""
import os
import soundfile as sf
from torch.utils.data import Dataset
from dataloader.utils import find_files
class SingleDataset(Dataset):
def __init__(
self,
files,
query="*.wav",
load_fn=sf.read,
return_utt_id=False,
subset_num=-1,
):
self.return_utt_id = return_utt_id
self.load_fn = load_fn
self.subset_num = subset_num
self.filenames = self._load_list(files, query)
self.utt_ids = self._load_ids(self.filenames)
def __getitem__(self, idx):
utt_id = self.utt_ids[idx]
data = self._data(idx)
if self.return_utt_id:
items = utt_id, data
else:
items = data
return items
def __len__(self):
return len(self.filenames)
def _read_list(self, listfile):
filenames = []
with open(listfile) as f:
for line in f:
line = line.strip()
if len(line):
filenames.append(line)
return filenames
def _load_list(self, files, query):
if isinstance(files, list):
filenames = files
else:
if os.path.isdir(files):
filenames = sorted(find_files(files, query))
elif os.path.isfile(files):
filenames = sorted(self._read_list(files))
else:
raise ValueError(f"{files} is not a list / existing folder or file!")
if self.subset_num > 0:
filenames = filenames[:self.subset_num]
assert len(filenames) != 0, f"File list in empty!"
return filenames
def _load_ids(self, filenames):
utt_ids = [
os.path.splitext(os.path.basename(f))[0] for f in filenames
]
return utt_ids
def _data(self, idx):
return self._load_data(self.filenames[idx], self.load_fn)
def _load_data(self, filename, load_fn):
if load_fn == sf.read:
data, _ = load_fn(filename, always_2d=True) # (T, C)
else:
data = load_fn(filename)
return data
class MultiDataset(SingleDataset):
def __init__(
self,
multi_files,
queries,
load_fns,
return_utt_id=False,
subset_num=-1,
):
errmsg = f"multi_files({len(multi_files)}), queries({len(queries)}), and load_fns({len(load_fns)}) are length mismatched!"
assert len(multi_files) == len(queries) == len(load_fns), errmsg
super(MultiDataset, self).__init__(
files=multi_files,
query=queries,
load_fn=load_fns,
return_utt_id=return_utt_id,
subset_num=subset_num,
)
self._check_length(self.filenames)
def _load_list(self, multi_files, queries):
multi_filenames = []
if isinstance(multi_files, list):
for files, query in zip(multi_files, queries):
multi_filenames.append(super()._load_list(files, query))
else:
raise ValueError(f"{multi_files} should be a list!")
return multi_filenames
def _load_ids(self, multi_filenames):
return super()._load_ids(multi_filenames[0])
def _data(self, idx):
filenames = [
f[idx] for f in self.filenames
]
data = []
for filename, load_fn in zip(filenames, self.load_fn):
data.append(self._load_data(filename, load_fn))
return data
def _check_length(self, multi_filenames):
errmsg = f"Not all lists have the same number of files!"
self.file_num = len(multi_filenames[0])
assert all(len(x)==self.file_num for x in multi_filenames), errmsg
def __len__(self):
return self.file_num
| AudioDec-main | dataloader/dataset.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""Customized collater modules for Pytorch DataLoader."""
import torch
import numpy as np
class CollaterAudio(object):
"""Customized collater for loading single audio."""
def __init__(
self,
batch_length=9600,
):
"""
Args:
batch_length (int): The length of audio signal batch.
"""
self.batch_length = batch_length
def __call__(self, batch):
# filter short batch
xs = [b for b in batch if len(b) > self.batch_length]
# random cut
starts, ends = self._random_segment(xs)
x_batch = self._cut(xs, starts, ends)
return x_batch
def _random_segment(self, xs):
x_lengths = [len(x) for x in xs]
start_offsets = np.array(
[
np.random.randint(0, xl - self.batch_length)
for xl in x_lengths
]
)
starts = start_offsets
ends = starts + self.batch_length
return starts, ends
def _cut(self, xs, starts, ends):
x_batch = np.array([x[start:end] for x, start, end in zip(xs, starts, ends)])
x_batch = torch.tensor(x_batch, dtype=torch.float).transpose(2, 1) # (B, C, T)
return x_batch
class CollaterAudioPair(CollaterAudio):
"""Customized collater for loading audio pair."""
def __init__(
self,
batch_length=9600,
):
super().__init__(
batch_length=batch_length
)
def __call__(self, batch):
batch = [
b for b in batch if (len(b[0]) > self.batch_length) and (len(b[0]) == len(b[1]))
]
assert len(batch) > 0, f"No qualified audio pairs.!"
xs, ns = [b[0] for b in batch], [b[1] for b in batch]
# random cut
starts, ends = self._random_segment(xs)
x_batch = self._cut(xs, starts, ends)
n_batch = self._cut(ns, starts, ends)
return n_batch, x_batch # (input, output)
| AudioDec-main | dataloader/collater.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os
import fnmatch
import logging
import numpy as np
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def load_files(data_path, query="*.wav", num_core=40):
# sort all files
file_list = sorted(find_files(data_path, query))
logging.info(f"The number of {os.path.basename(data_path)} files = {len(file_list)}.")
# divide
if num_core < len(file_list):
file_lists = np.array_split(file_list, num_core)
file_lists = [f_list.tolist() for f_list in file_lists]
else:
file_lists = [file_list]
return file_lists | AudioDec-main | dataloader/utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import inspect
import copy
import os
import ast
import yaml
import numpy as np
def convert_to_stringval(cfg_, squeeze=None, stringify_vals=False):
out = {}
convert_to_stringval_rec([('ROOT', cfg_)], out,
squeeze=squeeze, stringify_vals=stringify_vals)
return out
def convert_to_stringval_rec(flds, output, squeeze=None, stringify_vals=False):
for k, v in flds[-1][1].items():
if isinstance(v, dict):
flds_cp = copy.deepcopy(flds)
flds_cp.append((k, v))
convert_to_stringval_rec(flds_cp, output,
squeeze=squeeze,
stringify_vals=stringify_vals)
else:
valname_full = []
for f in flds[1:]:
valname_full.append(squeeze_string(f[0], squeeze))
valname_full.append(squeeze_string(k, squeeze))
valname_full = ".".join(valname_full)
if stringify_vals:
output[valname_full] = str(v)
else:
output[valname_full] = v
def squeeze_key_string(f, squeeze_inter, squeeze_tail):
keys = f.split('.')
tail = keys[-1]
inter = keys[0:-1]
nkeys = len(keys)
if nkeys > 1:
take_from_each = int(
np.floor(float(squeeze_inter-nkeys)/float(nkeys-1)))
take_from_each = max(take_from_each, 1)
for keyi in range(nkeys-1):
s = inter[keyi]
s = s[0:min(take_from_each, len(s))]
inter[keyi] = s
tail = squeeze_string(tail, squeeze_tail)
inter.append(tail)
out = ".".join(inter)
return out
def squeeze_string(f, squeeze):
if squeeze is None or squeeze > len(f):
return f
idx = np.round(np.linspace(0, len(f)-1, squeeze))
idx = idx.astype(int).tolist()
f_short = [f[i] for i in idx]
f_short = str("").join(f_short)
return f_short
def get_default_args(C):
# returns dict of keyword args of a callable C
sig = inspect.signature(C)
kwargs = {}
for pname, defval in dict(sig.parameters).items():
if defval.default == inspect.Parameter.empty:
# print('skipping %s' % pname)
continue
else:
kwargs[pname] = defval.default
return kwargs
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def arg_as_list(s):
v = ast.literal_eval(s)
if type(v) is not list:
raise argparse.ArgumentTypeError("Argument \"%s\" is not a list" % (s))
return v
def get_arg_parser(cfg_constructor):
dargs = get_default_args(cfg_constructor)
dargs_full_name = convert_to_stringval(dargs, stringify_vals=False)
parser = argparse.ArgumentParser(
description='Auto-initialized argument parser'
)
for darg, val in dargs_full_name.items():
tp = type(val) if val is not None else str
if tp == bool:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=str2bool,
)
elif tp == list:
parser.add_argument(
'--%s' % darg,
type=arg_as_list,
default=val,
help=darg)
else:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=tp,
)
return parser
def set_config_from_config(cfg, cfg_set):
# cfg_set ... dict with nested options
cfg_dot_separated = convert_to_stringval(cfg_set, stringify_vals=False)
set_config(cfg, cfg_dot_separated)
def set_config_rec(cfg, tgt_key, val, check_only=False):
if len(tgt_key) > 1:
k = tgt_key.pop(0)
if k not in cfg:
raise ValueError('no such config key %s' % k)
set_config_rec(cfg[k], tgt_key, val, check_only=check_only)
else:
if check_only:
assert cfg[tgt_key[0]] == val
else:
cfg[tgt_key[0]] = val
def set_config(cfg, cfg_set):
# cfg_set ... dict with .-separated options
for cfg_key, cfg_val in cfg_set.items():
# print('setting %s = %s' % (cfg_key,str(cfg_val)) )
cfg_key_split = [k for k in cfg_key.split('.') if len(k) > 0]
set_config_rec(cfg, copy.deepcopy(cfg_key_split), cfg_val)
set_config_rec(cfg, cfg_key_split, cfg_val, check_only=True)
def set_config_from_file(cfg, cfg_filename):
# set config from yaml file
with open(cfg_filename, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
set_config_from_config(cfg, yaml_cfg)
def dump_config(cfg):
cfg_filename = os.path.join(cfg.exp_dir, 'expconfig.yaml')
with open(cfg_filename, 'w') as yaml_file:
yaml.dump(cfg, yaml_file, default_flow_style=False)
| c3dpo_nrsfm-main | config.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from dataset.dataset_configs import STICKS
from tools.so3 import so3_exponential_map, rand_rot
from tools.functions import masked_kp_mean, \
argmin_translation, argmin_scale, \
avg_l2_huber
from tools.vis_utils import get_visdom_connection, \
show_projections, \
visdom_plot_pointclouds
from tools.utils import auto_init_args
import numpy as np
import torch.nn.functional as Fu
from torch import nn as nn
import torch
class C3DPO(torch.nn.Module):
def __init__(self, n_keypoints=17,
shape_basis_size=10,
n_fully_connected=1024,
n_layers=6,
keypoint_rescale=float(1),
keypoint_norm_type='to_mean',
projection_type='orthographic',
z_augment=True,
z_augment_rot_angle=float(np.pi)/8,
z_equivariance=True,
z_equivariance_rot_angle=float(np.pi)/8,
camera_translation=False,
camera_xy_translation=False,
argmin_translation=False,
camera_scale=False,
connectivity_setup='NONE',
huber_scaling=0.01,
reprojection_normalization='kp_total_count',
independent_phi_for_aug=False,
canonicalization={
'use': True,
'n_layers': 6,
'n_rand_samples': 4,
'rot_angle': float(np.pi),
'n_fully_connected': 1024,
},
perspective_depth_threshold=0.1,
depth_offset=0.,
replace_keypoints_with_input=True,
root_joint=0,
weight_init_std=0.01,
loss_weights={'l_reprojection': 1.,
'l_canonicalization': 1.},
log_vars=[
'objective',
'dist_reprojection',
'l_reprojection',
'l_canonicalization'],
**kwargs):
super(C3DPO, self).__init__()
# autoassign constructor params to self
auto_init_args(self)
# factorization net
self.phi = nn.Sequential(
*self.make_trunk(dim_in=self.n_keypoints * 3,
# 2 dim loc, 1 dim visibility
n_fully_connected=self.n_fully_connected,
n_layers=self.n_layers))
# shape coefficient predictor
self.alpha_layer = conv1x1(self.n_fully_connected,
self.shape_basis_size,
std=weight_init_std)
# 3D shape predictor
self.shape_layer = conv1x1(self.shape_basis_size, 3*n_keypoints,
std=weight_init_std)
# rotation predictor (predicts log-rotation)
self.rot_layer = conv1x1(self.n_fully_connected, 3,
std=weight_init_std)
if self.camera_translation:
# camera translation
self.translation_layer = conv1x1(self.n_fully_connected, 3,
std=weight_init_std)
if self.camera_scale:
# camera scale (with final sofplus to ensure positive outputs)
self.scale_layer = nn.Sequential(conv1x1(self.n_fully_connected, 3,
std=weight_init_std),
nn.Softplus())
if self.canonicalization['use']:
# canonicalization net:
self.psi = nn.Sequential(
*self.make_trunk(dim_in=self.n_keypoints*3,
n_fully_connected=self.canonicalization['n_fully_connected'],
n_layers=self.canonicalization['n_layers']))
self.alpha_layer_psi = conv1x1(self.n_fully_connected,
self.shape_basis_size,
std=weight_init_std)
def make_trunk(self,
n_fully_connected=None,
dim_in=None,
n_layers=None,
use_bn=True):
layer1 = ConvBNLayer(dim_in,
n_fully_connected,
use_bn=use_bn)
layers = [layer1]
for l in range(n_layers):
layers.append(ResLayer(n_fully_connected,
int(n_fully_connected/4)))
return layers
def forward(self, kp_loc=None, kp_vis=None,
class_mask=None, K=None, **kwargs):
# dictionary with outputs of the fw pass
preds = {}
# input sizes ...
ba, kp_dim, n_kp = kp_loc.shape
assert kp_dim == 2, 'bad input keypoint dim'
assert n_kp == self.n_keypoints, 'bad # of keypoints!'
if self.projection_type == 'perspective':
assert K is not None
kp_loc_cal = self.calibrate_keypoints(kp_loc, K)
else:
kp_loc_cal = kp_loc
# normalize keypoints
kp_loc_norm, kp_mean = \
self.normalize_keypoints(
kp_loc_cal, kp_vis, rescale=self.keypoint_rescale)
# save for later visualisations ...
preds['kp_loc_norm'] = kp_loc_norm
preds['kp_mean'] = kp_mean
# run the shape predictor
preds['phi'] = self.run_phi(kp_loc_norm, kp_vis, class_mask=class_mask)
if self.canonicalization['use']:
preds['l_canonicalization'], preds['psi'] = \
self.canonicalization_loss(preds['phi'],
class_mask=class_mask)
# 3D->2D project shape to camera
kp_reprojected, depth = self.camera_projection(
preds['phi']['shape_camera_coord'])
preds['kp_reprojected'] = kp_reprojected
# compute the repro loss for backpropagation
if self.reprojection_normalization == 'kp_count_per_image':
preds['l_reprojection'] = avg_l2_huber(
kp_reprojected,
kp_loc_norm,
mask=kp_vis,
squared=self.squared_reprojection_loss)
elif self.reprojection_normalization == 'kp_total_count':
def flatten_(x): return x.permute(
1, 2, 0).contiguous().view(1, 2, self.n_keypoints*ba)
preds['l_reprojection'] = avg_l2_huber(
flatten_(kp_reprojected),
flatten_(kp_loc_norm),
mask=kp_vis.permute(1, 0).contiguous().view(1, -1),
scaling=self.huber_scaling)
else:
raise ValueError('unknown loss normalization %s' %
self.loss_normalization)
# unnormalize the shape projections
kp_reprojected_image = \
self.unnormalize_keypoints(kp_reprojected, kp_mean,
rescale=self.keypoint_rescale)
# projections in the image coordinate frame
if self.replace_keypoints_with_input and not self.training:
# use the input points
kp_reprojected_image = \
(1-kp_vis[:, None, :]) * kp_reprojected_image + \
kp_vis[:, None, :] * kp_loc_cal
preds['kp_reprojected_image'] = kp_reprojected_image
# projected 3D shape in the image space
# = unprojection of kp_reprojected_image
shape_image_coord = self.camera_unprojection(
kp_reprojected_image, depth,
rescale=self.keypoint_rescale)
if self.projection_type == 'perspective':
preds['shape_image_coord_cal'] = shape_image_coord
shape_image_coord = \
self.uncalibrate_keypoints(shape_image_coord, K)
preds['kp_reprojected_image_uncal'], _ = \
self.camera_projection(shape_image_coord)
preds['shape_image_coord'] = shape_image_coord
# get the final loss
preds['objective'] = self.get_objective(preds)
assert np.isfinite(
preds['objective'].sum().data.cpu().numpy()), "nans!"
return preds
def camera_projection(self, shape):
depth = shape[:, 2:3, :]
if self.projection_type == 'perspective':
if self.perspective_depth_threshold > 0:
depth = torch.clamp(depth, self.perspective_depth_threshold)
projections = shape[:, 0:2, :] / depth
elif self.projection_type == 'orthographic':
projections = shape[:, 0:2, :]
else:
raise ValueError('no such projection type %s' %
self.projection_type)
return projections, depth
def camera_unprojection(self, kp_loc, depth, rescale=float(1)):
depth = depth / rescale
if self.projection_type == 'perspective':
shape = torch.cat((kp_loc * depth, depth), dim=1)
elif self.projection_type == 'orthographic':
shape = torch.cat((kp_loc, depth), dim=1)
else:
raise ValueError('no such projection type %s' %
self.projection_type)
return shape
def calibrate_keypoints(self, kp_loc, K):
# undo the projection matrix
assert K is not None
kp_loc = kp_loc - K[:, 0:2, 2:3]
focal = torch.stack((K[:, 0, 0], K[:, 1, 1]), dim=1)
kp_loc = kp_loc / focal[:, :, None]
return kp_loc
def uncalibrate_keypoints(self, kp_loc, K):
assert K is not None
kp_loc = torch.bmm(K, kp_loc)
return kp_loc
def normalize_keypoints(self,
kp_loc,
kp_vis,
rescale=1.,
K=None):
if self.keypoint_norm_type == 'to_root':
# center around the root joint
kp_mean = kp_loc[:, :, self.root_joint]
kp_loc_norm = kp_loc - kp_mean[:, :, None]
elif self.keypoint_norm_type == 'to_mean':
# calc the mean of visible points
kp_mean = masked_kp_mean(kp_loc, kp_vis)
# remove the mean from the keypoint locations
kp_loc_norm = kp_loc - kp_mean[:, :, None]
else:
raise ValueError('no such kp norm %s' %
self.keypoint_norm_type)
# rescale
kp_loc_norm = kp_loc_norm * rescale
return kp_loc_norm, kp_mean
def unnormalize_keypoints(self,
kp_loc_norm,
kp_mean,
rescale=1.,
K=None):
kp_loc = kp_loc_norm * (1. / rescale)
kp_loc = kp_loc + kp_mean[:, :, None]
return kp_loc
def run_phi(self,
kp_loc,
kp_vis,
class_mask=None,
):
preds = {}
# batch size
ba = kp_loc.shape[0]
dtype = kp_loc.type()
kp_loc_orig = kp_loc.clone()
if self.z_augment and self.training:
R_rand = rand_rot(ba,
dtype=dtype,
max_rot_angle=float(self.z_augment_rot_angle),
axes=(0, 0, 1))
kp_loc_in = torch.bmm(R_rand[:, 0:2, 0:2], kp_loc)
else:
R_rand = torch.eye(3).type(dtype)[None].repeat((ba, 1, 1))
kp_loc_in = kp_loc_orig
if self.z_equivariance and self.training:
# random xy rot
R_rand_eq = rand_rot(ba,
dtype=dtype,
max_rot_angle=float(
self.z_equivariance_rot_angle),
axes=(0, 0, 1))
kp_loc_in = torch.cat(
(kp_loc_in,
torch.bmm(R_rand_eq[:, 0:2, 0:2], kp_loc_in)
), dim=0)
kp_vis_in = kp_vis.repeat((2, 1))
else:
kp_vis_in = kp_vis
# mask kp_loc by kp_visibility
kp_loc_masked = kp_loc_in * kp_vis_in[:, None, :]
# vectorize
kp_loc_flatten = kp_loc_masked.view(-1, 2*self.n_keypoints)
# concatenate visibilities and kp locations
l1_input = torch.cat((kp_loc_flatten, kp_vis_in), dim=1)
# pass to network
if self.independent_phi_for_aug and l1_input.shape[0] == 2*ba:
feats = torch.cat([self.phi(l1_[:, :, None, None]) for
l1_ in l1_input.split(ba, dim=0)], dim=0)
else:
feats = self.phi(l1_input[:, :, None, None])
# coefficients into the linear basis
shape_coeff = self.alpha_layer(feats)[:, :, 0, 0]
if self.z_equivariance and self.training:
# use the shape coeff from the second set of preds
shape_coeff = shape_coeff[ba:]
# take the feats from the first set
feats = feats[:ba]
# shape prediction is just a linear layer implemented as a conv
shape_canonical = self.shape_layer(
shape_coeff[:, :, None, None])[:, :, 0, 0]
shape_canonical = shape_canonical.view(ba, 3, self.n_keypoints)
if self.keypoint_norm_type == 'to_root':
# make sure we fix the root at 0
root_j = shape_canonical[:, :, self.root_joint]
shape_canonical = shape_canonical - root_j[:, :, None]
# predict camera params
# ... log rotation (exponential representation)
R_log = self.rot_layer(feats)[:, :, 0, 0]
# convert from the 3D to 3x3 rot matrix
R = so3_exponential_map(R_log)
# T vector of the camera
if self.camera_translation:
T = self.translation_layer(feats)[:, :, 0, 0]
if self.camera_xy_translation: # kill the last z-dim
T = T * torch.tensor([1., 1., 0.]).type(dtype)[None, :]
else:
T = R_log.new_zeros(ba, 3)
# offset the translation vector of the camera
if self.depth_offset > 0.:
T[:, 2] = T[:, 2] + self.depth_offset
# scale of the camera
if self.camera_scale:
scale = self.scale_layer(feats)[:, 0, 0, 0]
else:
scale = R_log.new_ones(ba)
# rotated+scaled shape into the camera ( Y = sRX + T )
shape_camera_coord = self.apply_similarity_t(
shape_canonical, R, T, scale)
# undo equivariant transformation
if (self.z_equivariance or self.z_augment) and self.training:
R_rand_inv = R_rand.transpose(2, 1)
R = torch.bmm(R_rand_inv, R)
T = torch.bmm(R_rand_inv, T[:, :, None])[:, :, 0]
shape_camera_coord = torch.bmm(R_rand_inv, shape_camera_coord)
# estimate translation
if self.argmin_translation:
assert self.projection_type == 'orthographic'
projection, _ = self.camera_projection(shape_camera_coord)
T_amin = argmin_translation(projection, kp_loc_orig, v=kp_vis)
T_amin = Fu.pad(T_amin, (0, 1), 'constant', float(0))
shape_camera_coord = shape_camera_coord + T_amin[:, :, None]
T = T + T_amin
if class_mask is not None:
shape_camera_coord = shape_camera_coord * class_mask[:, None, :]
shape_canonical = shape_canonical * class_mask[:, None, :]
preds['R_log'] = R_log
preds['R'] = R
preds['scale'] = scale
preds['T'] = T
preds['shape_camera_coord'] = shape_camera_coord
preds['shape_coeff'] = shape_coeff
preds['shape_canonical'] = shape_canonical
return preds
def apply_similarity_t(self, S, R, T, s):
return torch.bmm(R, s[:, None, None] * S) + T[:, :, None]
def canonicalization_loss(self, phi_out, class_mask=None):
shape_canonical = phi_out['shape_canonical']
dtype = shape_canonical.type()
ba = shape_canonical.shape[0]
n_sample = self.canonicalization['n_rand_samples']
# rotate the canonical point cloud
# generate random rotation around all axes
R_rand = rand_rot(ba * n_sample,
dtype=dtype,
max_rot_angle=self.canonicalization['rot_angle'],
axes=(1, 1, 1))
unrotated = shape_canonical.repeat(n_sample, 1, 1)
rotated = torch.bmm(R_rand, unrotated)
psi_out = self.run_psi(rotated) # psi3( Rrand X )
a, b = psi_out['shape_canonical'], unrotated
l_canonicalization = avg_l2_huber(a, b,
scaling=self.huber_scaling,
mask=class_mask.repeat(n_sample, 1)
if class_mask is not None else None)
# reshape the outputs in the output list
psi_out = {k: v.view(
self.canonicalization['n_rand_samples'],
ba, *v.shape[1:]) for k, v in psi_out.items()}
return l_canonicalization, psi_out
def run_psi(self, shape_canonical):
preds = {}
# batch size
ba = shape_canonical.shape[0]
assert shape_canonical.shape[1] == 3, '3d inputs only please'
# reshape and pass to the network ...
l1_input = shape_canonical.view(ba, 3*self.n_keypoints)
# pass to network
feats = self.psi(l1_input[:, :, None, None])
# coefficients into the linear basis
shape_coeff = self.alpha_layer_psi(feats)[:, :, 0, 0]
preds['shape_coeff'] = shape_coeff
# use the shape_pred_layer from 2d predictor
shape_pred = self.shape_layer(
shape_coeff[:, :, None, None])[:, :, 0, 0]
shape_pred = shape_pred.view(ba, 3, self.n_keypoints)
preds['shape_canonical'] = shape_pred
return preds
def get_objective(self, preds):
losses_weighted = [preds[k] * float(w) for k, w in
self.loss_weights.items()
if k in preds]
if (not hasattr(self, '_loss_weights_printed') or
not self._loss_weights_printed) and self.training:
print('-------\nloss_weights:')
for k, w in self.loss_weights.items():
print('%20s: %1.2e' % (k, w))
print('-------')
self._loss_weights_printed = True
loss = torch.stack(losses_weighted).sum()
return loss
def visualize(self, visdom_env, trainmode,
preds, stats, clear_env=False):
viz = get_visdom_connection(server=stats.visdom_server,
port=stats.visdom_port)
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return
if clear_env: # clear visualisations
print(" ... clearing visdom environment")
viz.close(env=visdom_env, win=None)
print('vis into env:\n %s' % visdom_env)
it = stats.it[trainmode]
epoch = stats.epoch
idx_image = 0
title = "e%d_it%d_im%d" % (epoch, it, idx_image)
# get the connectivity pattern
sticks = STICKS[self.connectivity_setup] if \
self.connectivity_setup in STICKS else None
var_kp = {'orthographic': 'kp_reprojected_image',
'perspective': 'kp_reprojected_image_uncal'
}[self.projection_type]
# show reprojections
p = np.stack(
[preds[k][idx_image].detach().cpu().numpy()
for k in (var_kp, 'kp_loc')])
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
show_projections(p, visdom_env=visdom_env, v=v,
title='projections_'+title, cmap__='gist_ncar',
markersize=50, sticks=sticks,
stickwidth=1, plot_point_order=False,
image_path=preds['image_path'][idx_image],
visdom_win='projections')
# show 3d reconstruction
if True:
var3d = {'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal'
}[self.projection_type]
pcl = {'pred': preds[var3d]
[idx_image].detach().cpu().numpy().copy()}
if 'kp_loc_3d' in preds:
pcl['gt'] = preds['kp_loc_3d'][idx_image].detach(
).cpu().numpy().copy()
if self.projection_type == 'perspective':
# for perspective projections, we dont know the scale
# so we estimate it here ...
scale = argmin_scale(torch.from_numpy(pcl['pred'][None]),
torch.from_numpy(pcl['gt'][None]))
pcl['pred'] = pcl['pred'] * float(scale)
elif self.projection_type == 'orthographic':
# here we depth-center gt and predictions
for k in ('pred', 'gt'):
pcl_ = pcl[k].copy()
meanz = pcl_.mean(1) * np.array([0., 0., 1.])
pcl[k] = pcl_ - meanz[:, None]
else:
raise ValueError(self.projection_type)
visdom_plot_pointclouds(viz, pcl, visdom_env, '3d_'+title,
plot_legend=False, markersize=20,
sticks=sticks, win='3d')
def pytorch_ge12():
v = torch.__version__
v = float('.'.join(v.split('.')[0:2]))
return v >= 1.2
def conv1x1(in_planes, out_planes, std=0.01):
"""1x1 convolution"""
cnv = nn.Conv2d(in_planes, out_planes, bias=True, kernel_size=1)
cnv.weight.data.normal_(0., std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
return cnv
class ConvBNLayer(nn.Module):
def __init__(self, inplanes, planes, use_bn=True, stride=1, ):
super(ConvBNLayer, self).__init__()
# do a reasonable init
self.conv1 = conv1x1(inplanes, planes)
self.use_bn = use_bn
if use_bn:
self.bn1 = nn.BatchNorm2d(planes)
if pytorch_ge12():
self.bn1.weight.data.uniform_(0., 1.)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
out = self.conv1(x)
if self.use_bn:
out = self.bn1(out)
out = self.relu(out)
return out
class ResLayer(nn.Module):
def __init__(self, inplanes, planes, expansion=4):
super(ResLayer, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
if pytorch_ge12():
self.bn1.weight.data.uniform_(0., 1.)
self.conv2 = conv1x1(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
if pytorch_ge12():
self.bn2.weight.data.uniform_(0., 1.)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
if pytorch_ge12():
self.bn3.weight.data.uniform_(0., 1.)
self.relu = nn.ReLU(inplace=True)
self.skip = inplanes == (planes*self.expansion)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.skip:
out += residual
out = self.relu(out)
return out
| c3dpo_nrsfm-main | model.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import time
import copy
import json
import numpy as np
import torch
from dataset.dataset_zoo import dataset_zoo
from dataset.eval_zoo import eval_zoo
from model import C3DPO
from config import set_config_from_file, set_config, \
get_arg_parser, dump_config, get_default_args
from tools.attr_dict import nested_attr_dict
from tools.utils import auto_init_args, get_net_input, pprint_dict
from tools.stats import Stats
from tools.vis_utils import get_visdom_env
from tools.model_io import find_last_checkpoint, purge_epoch, \
load_model, get_checkpoint, save_model
from tools.cache_preds import cache_preds
def init_model_from_dir(exp_dir):
cfg_file = os.path.join(exp_dir, 'expconfig.yaml')
if not os.path.isfile(cfg_file):
print('no config %s!' % cfg_file)
return None
exp = ExperimentConfig(cfg_file=cfg_file)
exp.cfg.exp_dir = exp_dir # !
cfg = exp.cfg
# init the model
model, _, _ = init_model(cfg, force_load=True, clear_stats=True)
if torch.cuda.is_available():
model.cuda()
model.eval()
return model, cfg
def init_model(cfg, force_load=False, clear_stats=False, add_log_vars=None):
# get the model
model = C3DPO(**cfg.MODEL)
# obtain the network outputs that should be logged
if hasattr(model, 'log_vars'):
log_vars = copy.deepcopy(model.log_vars)
else:
log_vars = ['objective']
if add_log_vars is not None:
log_vars.extend(copy.deepcopy(add_log_vars))
visdom_env_charts = get_visdom_env(cfg) + "_charts"
# init stats struct
stats = Stats(log_vars, visdom_env=visdom_env_charts,
verbose=False, visdom_server=cfg.visdom_server,
visdom_port=cfg.visdom_port)
# find the last checkpoint
if cfg.resume_epoch > 0:
model_path = get_checkpoint(cfg.exp_dir, cfg.resume_epoch)
else:
model_path = find_last_checkpoint(cfg.exp_dir)
optimizer_state = None
if model_path is not None:
print("found previous model %s" % model_path)
if force_load or cfg.resume:
print(" -> resuming")
model_state_dict, stats_load, optimizer_state = load_model(
model_path)
if not clear_stats:
stats = stats_load
else:
print(" -> clearing stats")
model.load_state_dict(model_state_dict, strict=True)
model.log_vars = log_vars
else:
print(" -> but not resuming -> starting from scratch")
# update in case it got lost during load:
stats.visdom_env = visdom_env_charts
stats.visdom_server = cfg.visdom_server
stats.visdom_port = cfg.visdom_port
stats.plot_file = os.path.join(cfg.exp_dir, 'train_stats.pdf')
stats.synchronize_logged_vars(log_vars)
return model, stats, optimizer_state
def init_optimizer(model, optimizer_state,
PARAM_GROUPS=(),
freeze_bn=False,
breed='sgd',
weight_decay=0.0005,
lr_policy='multistep',
lr=0.001,
gamma=0.1,
momentum=0.9,
betas=(0.9, 0.999),
milestones=[30, 37, ],
max_epochs=43,
):
# init the optimizer
if hasattr(model, '_get_param_groups') and model.custom_param_groups:
# use the model function
p_groups = model._get_param_groups(lr, wd=weight_decay)
else:
allprm = [prm for prm in model.parameters() if prm.requires_grad]
p_groups = [{'params': allprm, 'lr': lr}]
if breed == 'sgd':
optimizer = torch.optim.SGD(p_groups, lr=lr,
momentum=momentum,
weight_decay=weight_decay)
elif breed == 'adagrad':
optimizer = torch.optim.Adagrad(p_groups, lr=lr,
weight_decay=weight_decay)
elif breed == 'adam':
optimizer = torch.optim.Adam(p_groups, lr=lr,
betas=betas,
weight_decay=weight_decay)
else:
raise ValueError("no such solver type %s" % breed)
print(" -> solver type = %s" % breed)
if lr_policy == 'multistep':
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=gamma)
else:
raise ValueError("no such lr policy %s" % lr_policy)
# add the max epochs here!
scheduler.max_epochs = max_epochs
if optimizer_state is not None:
print(" -> setting loaded optimizer state")
optimizer.load_state_dict(optimizer_state)
optimizer.zero_grad()
return optimizer, scheduler
def run_training(cfg):
"""
run the training loops
"""
# torch gpu setup
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu_idx)
if cfg.model_zoo is not None:
os.environ["TORCH_MODEL_ZOO"] = cfg.model_zoo
# make the exp dir
os.makedirs(cfg.exp_dir, exist_ok=True)
# set the seeds
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
# set cudnn to reproducibility mode
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# dump the exp config to the exp dir
dump_config(cfg)
# setup datasets
dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)
# init loaders
trainloader = torch.utils.data.DataLoader(dset_train,
num_workers=cfg.num_workers,
pin_memory=True,
batch_size=cfg.batch_size,
shuffle=True)
if dset_val is not None:
valloader = torch.utils.data.DataLoader(dset_val,
num_workers=cfg.num_workers,
pin_memory=True,
batch_size=cfg.batch_size,
shuffle=False)
else:
valloader = None
# test loaders
if dset_test is not None:
testloader = torch.utils.data.DataLoader(dset_test,
num_workers=cfg.num_workers,
pin_memory=True,
batch_size=cfg.batch_size,
shuffle=False)
_, _, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
else:
testloader = None
eval_vars = None
# init the model
model, stats, optimizer_state = init_model(cfg, add_log_vars=eval_vars)
start_epoch = stats.epoch + 1
# move model to gpu
if torch.cuda.is_available():
model.cuda()
# init the optimizer
optimizer, scheduler = init_optimizer(
model, optimizer_state=optimizer_state, **cfg.SOLVER)
# loop through epochs
scheduler.last_epoch = start_epoch
for epoch in range(start_epoch, cfg.SOLVER.max_epochs):
with stats: # automatic new_epoch and plotting at every epoch start
print("scheduler lr = %1.2e" % float(scheduler.get_lr()[-1]))
# train loop
trainvalidate(model, stats, epoch, trainloader, optimizer, False,
visdom_env_root=get_visdom_env(cfg), **cfg)
# val loop
if valloader is not None:
trainvalidate(model, stats, epoch, valloader, optimizer, True,
visdom_env_root=get_visdom_env(cfg), **cfg)
# eval loop (optional)
if testloader is not None:
eval_result = run_eval(cfg, model, testloader, stats=stats)
dump_eval_result(cfg, eval_result)
assert stats.epoch == epoch, "inconsistent stats!"
# delete previous models if required
if cfg.store_checkpoints_purge > 0 and cfg.store_checkpoints:
for prev_epoch in range(epoch-cfg.store_checkpoints_purge):
purge_epoch(cfg.exp_dir, prev_epoch)
# save model
if cfg.store_checkpoints:
outfile = get_checkpoint(cfg.exp_dir, epoch)
save_model(model, stats, outfile, optimizer=optimizer)
scheduler.step()
# the final eval
if testloader is not None:
eval_result = run_eval(cfg, model, testloader, stats=None)
dump_eval_result(cfg, eval_result)
return eval_result
else:
return None
def trainvalidate(model,
stats,
epoch,
loader,
optimizer,
validation,
bp_var='objective',
metric_print_interval=5,
visualize_interval=100,
visdom_env_root='trainvalidate',
**kwargs):
if validation:
model.eval()
trainmode = 'val'
else:
model.train()
trainmode = 'train'
t_start = time.time()
# clear the visualisations on the first run in the epoch
clear_visualisations = True
# get the visdom env name
visdom_env_imgs = visdom_env_root + "_images_" + trainmode
n_batches = len(loader)
for it, batch in enumerate(loader):
last_iter = it == n_batches-1
# move to gpu where possible
net_input = get_net_input(batch)
# the forward pass
if (not validation):
optimizer.zero_grad()
preds = model(**net_input)
else:
with torch.no_grad():
preds = model(**net_input)
# make sure we dont overwrite something
assert not any(k in preds for k in net_input.keys())
preds.update(net_input) # merge everything into one big dict
# update the stats logger
stats.update(preds, time_start=t_start, stat_set=trainmode)
assert stats.it[trainmode] == it, "inconsistent stat iteration number!"
# print textual status update
if (it % metric_print_interval) == 0 or last_iter:
stats.print(stat_set=trainmode, max_it=n_batches)
# visualize results
if (visualize_interval > 0) and (it % visualize_interval) == 0:
model.visualize(visdom_env_imgs, trainmode,
preds, stats, clear_env=clear_visualisations)
clear_visualisations = False
# optimizer step
if (not validation):
loss = preds[bp_var]
loss.backward()
optimizer.step()
def dump_eval_result(cfg, results):
# dump results of eval to cfg.exp_dir
resfile = os.path.join(cfg.exp_dir, 'eval_results.json')
with open(resfile, 'w') as f:
json.dump(results, f)
def run_eval(cfg, model, loader, stats=None):
eval_script, cache_vars, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
cached_preds = cache_preds(
model, loader, stats=stats, cache_vars=cache_vars)
results, _ = eval_script(cached_preds, eval_vars=eval_vars)
if stats is not None:
stats.update(results, stat_set='test')
stats.print(stat_set='test')
return results
class ExperimentConfig(object):
def __init__(self,
cfg_file=None,
model_zoo='./data/torch_zoo/',
exp_name='test',
exp_idx=0,
exp_dir='./data/exps/default/',
gpu_idx=0,
resume=True,
seed=0,
resume_epoch=-1,
store_checkpoints=True,
store_checkpoints_purge=3,
batch_size=256,
num_workers=8,
visdom_env='',
visdom_server='http://localhost',
visdom_port=8097,
metric_print_interval=5,
visualize_interval=0,
SOLVER=get_default_args(init_optimizer),
DATASET=get_default_args(dataset_zoo),
MODEL=get_default_args(C3DPO),
):
self.cfg = get_default_args(ExperimentConfig)
if cfg_file is not None:
set_config_from_file(self.cfg, cfg_file)
else:
auto_init_args(self, tgt='cfg', can_overwrite=True)
self.cfg = nested_attr_dict(self.cfg)
def run_experiment_from_cfg_file(cfg_file):
if not os.path.isfile(cfg_file):
print('no config %s!' % cfg_file)
return None
exp = ExperimentConfig(cfg_file=cfg_file)
results = run_training(exp.cfg)
return results
if __name__ == '__main__':
exp = ExperimentConfig()
parser = get_arg_parser(type(exp))
parsed = vars(parser.parse_args())
if parsed['cfg_file'] is not None:
print('setting config from cfg file %s' % parsed['cfg_file'])
set_config_from_file(exp.cfg, parsed['cfg_file'])
defaults = vars(parser.parse_args(''))
rest = {k: v for k, v in parsed.items() if defaults[k] != parsed[k]}
print('assigning remaining args: %s' % str(list(rest.keys())))
set_config(exp.cfg, rest)
else:
print('setting config from argparser')
set_config(exp.cfg, parsed)
pprint_dict(exp.cfg)
run_training(exp.cfg)
else:
pass
| c3dpo_nrsfm-main | experiment.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from dataset.dataset_zoo import dataset_zoo
from dataset.eval_zoo import eval_zoo
from experiment import init_model_from_dir
from tools.model_io import download_model
from tools.cache_preds import cache_preds
from tabulate import tabulate
def eval_model(dataset_name):
model_dir = download_model(dataset_name, force_download=False)
model, _ = init_model_from_dir(model_dir)
model.eval()
_, _, dataset_test = dataset_zoo(
dataset_name=dataset_name, sets_to_load=('val',),
force_download=False)
loader_test = torch.utils.data.DataLoader(dataset_test,
num_workers=8, pin_memory=True,
batch_size=1024, shuffle=False)
eval_script, cache_vars, eval_vars = eval_zoo(dataset_name)
cached_preds = cache_preds(model, loader_test, cache_vars=cache_vars)
results, _ = eval_script(cached_preds, eval_vars=eval_vars)
return results
if __name__ == '__main__':
results = {}
for dataset in ('h36m', 'h36m_hourglass', 'pascal3d_hrnet',
'pascal3d', 'up3d_79kp'):
results[dataset] = eval_model(dataset)
print('\n\nRESULTS:')
tab_rows = []
for dataset, result in results.items():
tab_row = [dataset]
tab_row.extend([result[m] for m in ('EVAL_MPJPE_best', 'EVAL_stress')])
tab_rows.append(tab_row)
print(tabulate(tab_rows, headers=['dataset', 'MPJPE', 'Stress']))
# RESULTS:
# dataset MPJPE Stress
# -------------- ----------- ----------
# h36m 95.6338 41.5864
# h36m_hourglass 145.021 84.693
# pascal3d_hrnet 56.8909 40.1775
# pascal3d 36.6413 31.0768
# up3d_79kp 0.0672771 0.0406902
| c3dpo_nrsfm-main | evaluate.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import torch
from PIL import Image
from dataset.dataset_configs import STICKS
from experiment import init_model_from_dir
from tools.model_io import download_model
from tools.utils import get_net_input
from tools.vis_utils import show_projections
from visuals.rotating_shape_video import rotating_3d_video
def run_demo():
net_input = get_net_input(get_test_h36m_sample())
model_dir = download_model('h36m')
model, _ = init_model_from_dir(model_dir)
model.eval()
preds = model(**net_input)
# input keypoints
kp_loc = net_input['kp_loc'][0]
# predicted 3d keypoints in camera coords
kp_pred_3d = preds['shape_image_coord'][0]
sticks = STICKS['h36m']
# viz = get_visdom_connection()
im_proj = show_projections(
kp_loc[None].detach().cpu().numpy(),
visdom_env='demo_h36m',
visdom_win='input_keypoints',
title='input_keypoints',
cmap__='rainbow',
markersize=40,
sticks=sticks,
stickwidth=2,
)
im_proj = Image.fromarray(im_proj)
im_proj_path = os.path.join(model_dir, 'demo_projections.png')
print('Saving input keypoints to %s' % im_proj_path)
im_proj.save(im_proj_path)
video_path = os.path.join(model_dir, 'demo_shape.mp4')
rotating_3d_video(kp_pred_3d.detach().cpu(),
video_path=video_path,
sticks=sticks,
title='rotating 3d',
cmap='rainbow',
visdom_env='demo_h36m',
visdom_win='3d_shape',
get_frames=7, )
def get_test_h36m_sample():
kp_loc = \
[[0.0000, 0.2296, 0.1577, 0.1479, -0.2335, -0.1450, 0.0276,
0.0090, 0.0065, -0.0022, 0.0566, -0.3193, -0.4960, -0.4642,
0.3650, 0.8939, 1.3002],
[0.0000, -0.0311, 0.8875, 1.8011, 0.0319, 0.9565, 1.8620,
-0.5053, -1.0108, -1.2185, -1.4179, -0.9106, -0.3406, 0.1310,
-0.9744, -0.7978, -0.8496]]
kp_vis = [1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1.]
kp_loc, kp_vis = [torch.FloatTensor(a) for a in (kp_loc, kp_vis)]
return {'kp_loc': kp_loc[None], 'kp_vis': kp_vis[None]}
if __name__ == '__main__':
run_demo()
| c3dpo_nrsfm-main | demo.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
def masked_kp_mean(kp_loc, kp_vis):
visibility_mass = torch.clamp(kp_vis.sum(1), 1e-4)
kp_mean = (kp_loc*kp_vis[:, None, :]).sum(2)
kp_mean = kp_mean / visibility_mass[:, None]
return kp_mean
def huber(dfsq, scaling=0.03):
loss = (safe_sqrt(1+dfsq/(scaling*scaling), eps=1e-4)-1) * scaling
return loss
def avg_l2_huber(x, y, mask=None, scaling=0.03):
diff = x - y
dist = (diff*diff).sum(1)
dist = huber(dist, scaling=float(scaling))
if mask is not None:
dist = (dist*mask).sum(1) / \
torch.clamp(mask.sum(1), 1.)
else:
if len(dist.shape) == 2 and dist.shape[1] > 1:
dist = dist.mean(1)
dist = dist.mean()
return dist
def avg_l2_dist(x, y, squared=False, mask=None, eps=1e-4):
diff = x - y
dist = (diff*diff).sum(1)
if not squared:
dist = safe_sqrt(dist, eps=eps)
if mask is not None:
dist = (dist*mask).sum(1) / \
torch.clamp(mask.sum(1), 1.)
else:
if len(dist.shape) == 2 and dist.shape[1] > 1:
dist = dist.mean(1)
dist = dist.mean()
return dist
def argmin_translation(x, y, v=None):
# find translation "T" st. x + T = y
x_mu = x.mean(2)
if v is not None:
vmass = torch.clamp(v.sum(1, keepdim=True), 1e-4)
x_mu = (v[:, None, :]*x).sum(2) / vmass
y_mu = (v[:, None, :]*y).sum(2) / vmass
T = y_mu - x_mu
return T
def argmin_scale(x, y, v=None):
# find scale "s" st.: sx=y
if v is not None: # mask invisible
x = x * v[:, None, :]
y = y * v[:, None, :]
xtx = (x*x).sum(1).sum(1)
xty = (x*y).sum(1).sum(1)
s = xty / torch.clamp(xtx, 1e-4)
return s
def safe_sqrt(A, eps=float(1e-4)):
"""
performs safe differentiable sqrt
"""
return (torch.clamp(A, float(0))+eps).sqrt()
| c3dpo_nrsfm-main | tools/functions.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from PIL import Image
import tempfile
import numpy as np
import os
import matplotlib
import matplotlib.pyplot as plt
class VideoWriter:
def __init__(self,
ffmpeg_bin='ffmpeg',
out_path='/tmp/video.mp4',
fps=20,
output_format='visdom'):
print("video writer for %s" % out_path)
self.output_format = output_format
self.fps = fps
self.out_path = out_path
self.ffmpeg_bin = ffmpeg_bin
self.frames = []
self.regexp = 'frame_%08d.png'
self.frame_num = 0
self.temp_dir = tempfile.TemporaryDirectory()
self.cache_dir = self.temp_dir.name
def __del__(self):
self.temp_dir.cleanup()
def write_frame(self, frame, resize=None):
outfile = os.path.join(self.cache_dir, self.regexp % self.frame_num)
ftype = type(frame)
if ftype == matplotlib.figure.Figure:
plt.savefig(outfile)
im = None
elif ftype == np.array or ftype == np.ndarray:
im = Image.fromarray(frame)
elif ftype == Image.Image:
im = frame
elif ftype == str:
im = Image.open(frame).convert('RGB')
else:
raise ValueError('cant convert type %s' % str(ftype))
if im is not None:
if resize is not None:
if type(resize) in (float,):
resize = [int(resize*s) for s in im.size]
else:
resize = [int(resize[1]), int(resize[0])]
resize[0] += resize[0] % 2
resize[1] += resize[1] % 2
im = im.resize(resize, Image.ANTIALIAS)
im.save(outfile)
self.frames.append(outfile)
self.frame_num += 1
def get_video(self, silent=True):
regexp = os.path.join(self.cache_dir, self.regexp)
if self.output_format == 'visdom': # works for ppt too
ffmcmd_ = "%s -r %d -i %s -vcodec h264 -f mp4 \
-y -b 2000k -pix_fmt yuv420p %s" % \
(self.ffmpeg_bin, self.fps, regexp, self.out_path)
else:
raise ValueError('no such output type %s' %
str(self.output_format))
print(ffmcmd_)
if silent:
ffmcmd_ += ' > /dev/null 2>&1'
os.system(ffmcmd_)
return self.out_path
| c3dpo_nrsfm-main | tools/video_writer.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
def nested_attr_dict(dct):
if type(dct) in (dict, AttrDict):
dct = AttrDict(dct)
for k, v in dct.items():
dct[k] = nested_attr_dict(v)
return dct
class AttrDict(dict):
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
| c3dpo_nrsfm-main | tools/attr_dict.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pickle
import torch
import glob
import os
import yaml
def load_stats(flstats):
try:
stats, _ = pickle.load(open(flstats, 'rb')) # dont load the config
except:
# print("Cant load stats! %s" % flstats)
stats = None
return stats
def get_model_path(fl):
fl = os.path.splitext(fl)[0]
flmodel = "%s.pth" % fl
return flmodel
def get_optimizer_path(fl):
fl = os.path.splitext(fl)[0]
flopt = "%s_opt.pth" % fl
return flopt
def get_stats_path(fl):
fl = os.path.splitext(fl)[0]
flstats = "%s_stats.pkl" % fl
return flstats
def save_model(model, stats, fl, optimizer=None, cfg=None):
flstats = get_stats_path(fl)
flmodel = get_model_path(fl)
print("saving model to %s" % flmodel)
torch.save(model.state_dict(), flmodel)
if optimizer is not None:
flopt = get_optimizer_path(fl)
print("saving optimizer to %s" % flopt)
torch.save(optimizer.state_dict(), flopt)
print("saving model stats and cfg to %s" % flstats)
pickle.dump((stats, cfg), open(flstats, 'wb'))
def load_model(fl):
flstats = get_stats_path(fl)
flmodel = get_model_path(fl)
flopt = get_optimizer_path(fl)
model_state_dict = torch.load(flmodel, map_location='cpu')
stats = load_stats(flstats)
if os.path.isfile(flopt):
optimizer = torch.load(flopt, map_location='cpu')
else:
optimizer = None
return model_state_dict, stats, optimizer
def get_checkpoint(exp_dir, epoch):
fl = os.path.join(exp_dir, 'model_epoch_%08d.pth' % epoch)
return fl
def find_last_checkpoint(exp_dir, any_path=False):
if any_path:
exts = ['.pth', '_stats.pkl', '_opt.pth']
else:
exts = ['.pth']
for ext in exts:
fls = sorted(glob.glob(os.path.join(
exp_dir, 'model_epoch_'+'[0-9]'*8+ext)))
if len(fls) > 0:
break
if len(fls) == 0:
fl = None
else:
fl = fls[-1][0:-len(ext)] + '.pth'
return fl
def purge_epoch(exp_dir, epoch):
model_path = get_checkpoint(exp_dir, epoch)
to_kill = [model_path,
get_optimizer_path(model_path),
get_stats_path(model_path)]
for k in to_kill:
if os.path.isfile(k):
print('deleting %s' % k)
os.remove(k)
def download_model(model_name, force_download=False):
import urllib.request
from dataset.dataset_configs import MODEL_URL, MODEL_MD5, EXP_ROOT
from tools.utils import md5
exp_name = 'pretrained_%s' % model_name
outdir = os.path.join(EXP_ROOT, exp_name)
if os.path.isdir(outdir) and not force_download:
return outdir
os.makedirs(outdir, exist_ok=True)
url = MODEL_URL[model_name]
print('downloading model %s from %s' % (model_name, url))
model_file = get_checkpoint(outdir, 0)
try:
urllib.request.urlretrieve(url, model_file)
except:
if os.path.isfile(model_file):
os.remove(model_file)
raise BaseException('cant download %s' % model_file)
assert md5(model_file) == MODEL_MD5[model_name], 'bad md5!'
# copy the yaml config from our ./cfgs/
cfg_file_src = './cfgs/%s.yaml' % model_name
with open(cfg_file_src, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
# overwrite some important fields
cfg['exp_dir'] = outdir
cfg['exp_name'] = exp_name
cfg_file_dst = os.path.join(outdir, 'expconfig.yaml')
print('dumping to %s' % cfg_file_dst)
with open(cfg_file_dst, 'w') as f:
yaml.dump(cfg, f)
assert os.path.isfile(cfg_file_dst)
return outdir
| c3dpo_nrsfm-main | tools/model_io.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from torch._six import container_abcs, string_classes, int_classes
import re
import time
import sys
import torch
from tqdm import tqdm
from tools.utils import has_method, get_net_input
def cache_preds(model, loader, cache_vars=None, stats=None, n_extract=None):
print("caching model predictions: %s" % str(cache_vars))
model.eval()
trainmode = 'test'
t_start = time.time()
cached_preds = []
cache_size = 0. # in GB ... counts only cached tensor sizes
n_batches = len(loader)
if n_extract is not None:
n_batches = n_extract
with tqdm(total=n_batches, file=sys.stdout) as pbar:
for it, batch in enumerate(loader):
last_iter = it == n_batches-1
# move to gpu and cast to Var
net_input = get_net_input(batch)
with torch.no_grad():
preds = model(**net_input)
assert not any(k in preds for k in net_input.keys())
preds.update(net_input) # merge everything into one big dict
if stats is not None:
stats.update(preds, time_start=t_start, stat_set=trainmode)
assert stats.it[trainmode] == it, \
"inconsistent stat iteration number!"
# restrict the variables to cache
if cache_vars is not None:
preds = {k: preds[k] for k in cache_vars if k in preds}
# ... gather and log the size of the cache
preds, preds_size = gather_all(preds)
cache_size += preds_size
cached_preds.append(preds)
pbar.set_postfix(cache_size="%1.2f GB" % cache_size)
pbar.update(1)
if last_iter and n_extract is not None:
break
cached_preds_cat = concatenate_cache(cached_preds)
return cached_preds_cat
def gather_all(preds):
cache_size = 0
for k in preds:
if has_method(preds[k], 'cuda'):
preds[k] = preds[k].data.cpu()
cache_size += preds[k].numpy().nbytes / 1e9
elif type(preds[k]) == dict:
preds[k], size_now = gather_all(preds[k])
cache_size += size_now
return preds, cache_size
# cache concatenation - largely taken from pytorch default_collate()
np_str_obj_array_pattern = re.compile(r'[SaUO]')
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def concatenate_cache(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
return torch.cat(batch, 0, out=out) # the main difference is here
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return concatenate_cache([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: concatenate_cache([d[key] for d in batch])
for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'):
return type(batch[0])(*(concatenate_cache(samples)
for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence): # also some diffs here
# just unpack
return [s_ for s in batch for s_ in s]
raise TypeError((error_msg_fmt.format(type(batch[0]))))
| c3dpo_nrsfm-main | tools/cache_preds.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import math
import torch.nn.functional as Fu
def rand_rot(N, dtype=None, max_rot_angle=float(math.pi),
axes=(1, 1, 1), get_ss=False):
rand_axis = torch.zeros((N, 3)).type(dtype).normal_()
# apply the axes mask
axes = torch.Tensor(axes).type(dtype)
rand_axis = axes[None, :] * rand_axis
rand_axis = Fu.normalize(rand_axis, dim=1, p=2)
rand_angle = torch.ones(N).type(dtype).uniform_(0, max_rot_angle)
R_ss_rand = rand_axis * rand_angle[:, None]
R_rand = so3_exponential_map(R_ss_rand)
if get_ss:
return R_rand, R_ss_rand
else:
return R_rand
def so3_exponential_map(log_rot: torch.Tensor, eps: float = 0.0001):
"""
Convert a batch of logarithmic representations of rotation matrices
`log_rot` to a batch of 3x3 rotation matrices using Rodrigues formula.
The conversion has a singularity around 0 which is handled by clamping
controlled with the `eps` argument.
Args:
log_rot: batch of vectors of shape `(minibatch , 3)`
eps: a float constant handling the conversion singularity around 0
Returns:
batch of rotation matrices of shape `(minibatch , 3 , 3)`
Raises:
ValueError if `log_rot` is of incorrect shape
"""
_, dim = log_rot.shape
if dim != 3:
raise ValueError('Input tensor shape has to be Nx3.')
nrms = (log_rot * log_rot).sum(1)
phis = torch.clamp(nrms, 0.).sqrt()
phisi = 1. / (phis+eps)
fac1 = phisi * phis.sin()
fac2 = phisi * phisi * (1. - phis.cos())
ss = hat(log_rot)
R = fac1[:, None, None] * ss + \
fac2[:, None, None] * torch.bmm(ss, ss) + \
torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
return R
def hat(v: torch.Tensor):
"""
Compute the Hat operator [1] of a batch of 3D vectors.
Args:
v: batch of vectors of shape `(minibatch , 3)`
Returns:
batch of skew-symmetric matrices of shape `(minibatch, 3, 3)`
Raises:
ValueError if `v` is of incorrect shape
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N, dim = v.shape
if dim != 3:
raise ValueError('Input vectors have to be 3-dimensional.')
h = v.new_zeros(N, 3, 3)
x, y, z = v[:, 0], v[:, 1], v[:, 2]
h[:, 0, 1] = -z
h[:, 0, 2] = y
h[:, 1, 0] = z
h[:, 1, 2] = -x
h[:, 2, 0] = -y
h[:, 2, 1] = x
return h
| c3dpo_nrsfm-main | tools/so3.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from tools.attr_dict import AttrDict
import inspect
import time
import numpy as np
import hashlib
import torch
def pprint_dict(d, indent=3):
for key, value in d.items():
print(' ' * indent + str(key), end='', flush=True)
if isinstance(value, AttrDict):
print("")
pprint_dict(value, indent+1)
else:
print(' = ' + str(value))
def has_method(ob, m):
obcls = ob.__class__
return hasattr(obcls, m) and callable(getattr(obcls, m))
def get_net_input(batch):
# move to gpu and cast to Var
net_input = {}
for k in batch:
if has_method(batch[k], 'cuda') and torch.cuda.is_available():
net_input[k] = batch[k].cuda()
else:
net_input[k] = batch[k]
return net_input
def auto_init_args(obj, tgt=None, can_overwrite=False):
# autoassign constructor arguments
frame = inspect.currentframe().f_back # the frame above
params = frame.f_locals
nparams = frame.f_code.co_argcount
paramnames = frame.f_code.co_varnames[1:nparams]
if tgt is not None:
if not can_overwrite:
assert not hasattr(obj, tgt)
setattr(obj, tgt, AttrDict())
tgt_attr = getattr(obj, tgt)
else:
tgt_attr = obj
for name in paramnames:
# print('autosetting %s -> %s' % (name,str(params[name])) )
setattr(tgt_attr, name, params[name])
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class NumpySeedFix(object):
def __init__(self, seed=0):
self.rstate = None
self.seed = seed
def __enter__(self):
self.rstate = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, type, value, traceback):
if not(type is None) and issubclass(type, Exception):
print("error inside 'with' block")
return
np.random.set_state(self.rstate)
class Timer:
def __init__(self, name="timer", quiet=False):
self.name = name
self.quiet = quiet
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if not self.quiet:
print("%20s: %1.6f sec" % (self.name, self.interval))
| c3dpo_nrsfm-main | tools/utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import time
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from itertools import cycle
from collections.abc import Iterable
from tools.vis_utils import get_visdom_connection
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.history = []
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1, epoch=0):
# make sure the history is of the same len as epoch
while len(self.history) <= epoch:
self.history.append([])
self.history[epoch].append(val / n)
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_epoch_averages(self, epoch=-1):
if len(self.history) == 0: # no stats here
return None
elif epoch == -1:
return [float(np.array(x).mean()) for x in self.history]
else:
return float(np.array(self.history[epoch]).mean())
def get_all_values(self):
all_vals = [np.array(x) for x in self.history]
all_vals = np.concatenate(all_vals)
return all_vals
def get_epoch(self):
return len(self.history)
class Stats(object):
"""
stats logging object useful for gathering statistics of training
a deep net in pytorch
Example:
# init stats structure that logs statistics 'objective' and 'top1e'
stats = Stats( ('objective','top1e') )
network = init_net() # init a pytorch module (=nueral network)
dataloader = init_dataloader() # init a dataloader
for epoch in range(10):
# start of epoch -> call new_epoch
stats.new_epoch()
# iterate over batches
for batch in dataloader:
# run network and save into a dict of output variables "output"
output = network(batch)
# stats.update() automatically parses the 'objective' and 'top1e'
# from the "output" dict and stores this into the db
stats.update(output)
stats.print() # prints the averages over given epoch
# stores the training plots into '/tmp/epoch_stats.pdf'
# and plots into a visdom server running at localhost (if running)
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
"""
def __init__(self, log_vars, verbose=False,
epoch=-1, visdom_env='main',
do_plot=True, plot_file=None,
visdom_server='http://localhost',
visdom_port=8097):
self.verbose = verbose
self.log_vars = log_vars
self.visdom_env = visdom_env
self.visdom_server = visdom_server
self.visdom_port = visdom_port
self.plot_file = plot_file
self.do_plot = do_plot
self.hard_reset(epoch=epoch)
# some sugar to be used with "with stats:" at the beginning of the epoch
def __enter__(self):
if self.do_plot and self.epoch >= 0:
self.plot_stats(self.visdom_env)
self.new_epoch()
def __exit__(self, type, value, traceback):
iserr = not(type is None) and issubclass(type, Exception)
iserr = iserr or (type is KeyboardInterrupt)
if iserr:
print("error inside 'with' block")
return
if self.do_plot:
self.plot_stats(self.visdom_env)
def reset(self): # to be called after each epoch
stat_sets = list(self.stats.keys())
if self.verbose:
print("stats: epoch %d - reset" % self.epoch)
self.it = {k: -1 for k in stat_sets}
for stat_set in stat_sets:
for stat in self.stats[stat_set]:
self.stats[stat_set][stat].reset()
def hard_reset(self, epoch=-1): # to be called during object __init__
self.epoch = epoch
if self.verbose:
print("stats: epoch %d - hard reset" % self.epoch)
self.stats = {}
# reset
self.reset()
def new_epoch(self):
if self.verbose:
print("stats: new epoch %d" % (self.epoch+1))
self.epoch += 1
self.reset() # zero the stats + increase epoch counter
def gather_value(self, val):
if type(val) == float:
pass
else:
val = val.data.cpu().numpy()
val = float(val.sum())
return val
def update(self, preds, time_start=None,
freeze_iter=False, stat_set='train'):
if self.epoch == -1: # uninitialized
print(
"warning: epoch==-1 means uninitialized stats structure\
-> new_epoch() called")
self.new_epoch()
if stat_set not in self.stats:
self.stats[stat_set] = {}
self.it[stat_set] = -1
if not freeze_iter:
self.it[stat_set] += 1
epoch = self.epoch
it = self.it[stat_set]
for stat in self.log_vars:
if stat not in self.stats[stat_set]:
self.stats[stat_set][stat] = AverageMeter()
if stat == 'sec/it': # compute speed
if time_start is None:
elapsed = 0.
else:
elapsed = time.time() - time_start
time_per_it = float(elapsed) / float(it+1)
val = time_per_it
else:
if stat in preds:
try:
val = self.gather_value(preds[stat])
except:
raise ValueError("could not extract prediction %s\
from the prediction dictionary" %
stat)
else:
val = None
if val is not None:
self.stats[stat_set][stat].update(val, epoch=epoch, n=1)
def get_epoch_averages(self, epoch=None):
stat_sets = list(self.stats.keys())
if epoch is None:
epoch = self.epoch
if epoch == -1:
epoch = list(range(self.epoch))
outvals = {}
for stat_set in stat_sets:
outvals[stat_set] = {'epoch': epoch,
'it': self.it[stat_set],
'epoch_max': self.epoch}
for stat in self.stats[stat_set].keys():
if self.stats[stat_set][stat].count == 0:
continue
if isinstance(epoch, Iterable):
avgs = self.stats[stat_set][stat].get_epoch_averages()
avgs = [avgs[e] for e in epoch]
else:
avgs = self.stats[stat_set][stat].get_epoch_averages(
epoch=epoch)
outvals[stat_set][stat] = avgs
return outvals
def print(self, max_it=None, stat_set='train',
vars_print=None, get_str=False):
epoch = self.epoch
stats = self.stats
str_out = ""
it = self.it[stat_set]
stat_str = ""
stats_print = sorted(stats[stat_set].keys())
for stat in stats_print:
if stats[stat_set][stat].count == 0:
continue
stat_str += " {0:.12}: {1:1.3f} |".format(
stat, stats[stat_set][stat].avg)
head_str = "[%s] | epoch %3d | it %5d" % (stat_set, epoch, it)
if max_it:
head_str += "/ %d" % max_it
str_out = "%s | %s" % (head_str, stat_str)
if get_str:
return str_out
else:
print(str_out)
def plot_stats(self, visdom_env=None, plot_file=None,
visdom_server=None, visdom_port=None):
# use the cached visdom env if none supplied
if visdom_env is None:
visdom_env = self.visdom_env
if visdom_server is None:
visdom_server = self.visdom_server
if visdom_port is None:
visdom_port = self.visdom_port
if plot_file is None:
plot_file = self.plot_file
stat_sets = list(self.stats.keys())
print("printing charts to visdom env '%s' (%s:%d)" %
(visdom_env, visdom_server, visdom_port))
novisdom = False
viz = get_visdom_connection(server=visdom_server, port=visdom_port)
if not viz.check_connection():
print("no visdom server! -> skipping visdom plots")
novisdom = True
lines = []
# plot metrics
if not novisdom:
viz.close(env=visdom_env, win=None)
for stat in self.log_vars:
vals = []
stat_sets_now = []
for stat_set in stat_sets:
val = self.stats[stat_set][stat].get_epoch_averages()
if val is None:
continue
else:
val = np.array(val)[:, None]
stat_sets_now.append(stat_set)
vals.append(val)
if len(vals) == 0:
continue
vals = np.concatenate(vals, axis=1)
x = np.arange(vals.shape[0])
lines.append((stat_sets_now, stat, x, vals,))
if not novisdom:
for idx, (tmodes, stat, x, vals) in enumerate(lines):
title = "%s" % stat
opts = dict(title=title, legend=list(tmodes))
if vals.shape[1] == 1:
vals = vals[:, 0]
viz.line(Y=vals, X=x, env=visdom_env, opts=opts)
if plot_file:
print("exporting stats to %s" % plot_file)
ncol = 3
nrow = int(np.ceil(float(len(lines))/ncol))
matplotlib.rcParams.update({'font.size': 5})
color = cycle(plt.cm.tab10(np.linspace(0, 1, 10)))
fig = plt.figure(1)
plt.clf()
for idx, (tmodes, stat, x, vals) in enumerate(lines):
c = next(color)
plt.subplot(nrow, ncol, idx+1)
for vali, vals_ in enumerate(vals.T):
c_ = c * (1. - float(vali) * 0.3)
plt.plot(x, vals_, c=c_, linewidth=1)
plt.ylabel(stat)
plt.xlabel("epoch")
plt.gca().yaxis.label.set_color(c[0:3]*0.75)
plt.legend(tmodes)
gcolor = np.array(mcolors.to_rgba('lightgray'))
plt.grid(b=True, which='major', color=gcolor,
linestyle='-', linewidth=0.4)
plt.grid(b=True, which='minor', color=gcolor,
linestyle='--', linewidth=0.2)
plt.minorticks_on()
plt.tight_layout()
plt.show()
fig.savefig(plot_file)
def synchronize_logged_vars(self, log_vars, default_val=float('NaN')):
stat_sets = list(self.stats.keys())
# remove the additional log_vars
for stat_set in stat_sets:
for stat in self.stats[stat_set].keys():
if stat not in log_vars:
print("additional stat %s:%s -> removing" %
(stat_set, stat))
self.stats[stat_set] = {
stat: v for stat, v in self.stats[stat_set].items()
if stat in log_vars
}
self.log_vars = log_vars # !!!
for stat_set in stat_sets:
reference_stat = list(self.stats[stat_set].keys())[0]
for stat in log_vars:
if stat not in self.stats[stat_set]:
print("missing stat %s:%s -> filling with default values (%1.2f)" %
(stat_set, stat, default_val))
elif len(self.stats[stat_set][stat].history) != self.epoch+1:
h = self.stats[stat_set][stat].history
if len(h) == 0: # just never updated stat ... skip
continue
else:
print("incomplete stat %s:%s -> reseting with default values (%1.2f)" %
(stat_set, stat, default_val))
else:
continue
self.stats[stat_set][stat] = AverageMeter()
self.stats[stat_set][stat].reset()
lastep = self.epoch+1
for ep in range(lastep):
self.stats[stat_set][stat].update(
default_val, n=1, epoch=ep)
epoch_self = self.stats[stat_set][reference_stat].get_epoch()
epoch_generated = self.stats[stat_set][stat].get_epoch()
assert epoch_self == epoch_generated, \
"bad epoch of synchronized log_var! %d vs %d" % \
(epoch_self, epoch_generated)
| c3dpo_nrsfm-main | tools/stats.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import io
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from PIL import Image
from visdom import Visdom
from tools.utils import NumpySeedFix
# the visdom connection handle
viz = None
def get_visdom_env(cfg):
if len(cfg.visdom_env) == 0:
visdom_env = cfg.exp_dir
else:
visdom_env = cfg.visdom_env
return visdom_env
def get_visdom_connection(server='http://localhost', port=8097):
global viz
if viz is None:
viz = Visdom(server=server, port=port)
return viz
def denorm_image_trivial(im):
im = im - im.min()
im = im / (im.max()+1e-7)
return im
def ensure_im_width(img, basewidth):
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
return img
def fig2data(fig, size=None):
"""
Convert a Matplotlib figure to a numpy array
"""
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
buf.seek(0)
im = Image.open(buf).convert('RGB')
if size:
im = im.resize(size)
return np.array(im)
def show_projections(p,
visdom_env=None,
visdom_win=None,
v=None,
image_path=None,
image=None,
title='projs',
cmap__='gist_ncar',
markersize=None,
sticks=None,
stickwidth=2,
stick_color=None,
plot_point_order=False,
bbox=None,
):
if image is None:
try:
im = Image.open(image_path).convert('RGB')
im = np.array(im).transpose(2, 0, 1)
except:
im = None
print('!cant load image %s' % image_path)
else:
im = image
nkp = int(p.shape[2])
pid = np.linspace(0., 1., nkp)
if v is not None:
okp = v > 0
else:
okp = np.ones(nkp) == 1
possible_markers = ['o', 'x', 'd']
markers = [possible_markers[i % len(possible_markers)]
for i in range(len(p))]
if markersize is None:
msz = 50
if nkp > 40:
msz = 5
markersizes = [msz]*nkp
else:
markersizes = [markersize]*nkp
fig = plt.figure(figsize=[11, 11])
if im is not None:
plt.imshow(im.transpose((1, 2, 0)))
plt.axis('off')
if sticks is not None:
if stick_color is not None:
linecol = stick_color
else:
linecol = [0., 0., 0.]
for p_ in p:
for stick in sticks:
if v is not None:
if v[stick[0]] > 0 and v[stick[1]] > 0:
linestyle = '-'
else:
continue
else:
linestyle = '-'
plt.plot(p_[0, stick], p_[1, stick], linestyle,
color=linecol, linewidth=stickwidth, zorder=1)
for p_, marker, msz in zip(p, markers, markersizes):
plt.scatter(p_[0, okp], p_[1, okp], msz, pid[okp],
cmap=cmap__, linewidths=2, marker=marker, zorder=2,
vmin=0., vmax=1.)
if plot_point_order:
for ii in np.where(okp)[0]:
plt.text(p_[0, ii], p_[1, ii], '%d' %
ii, fontsize=int(msz*0.25))
if bbox is not None:
import matplotlib.patches as patches
# Create a Rectangle patch
rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],
linewidth=1, edgecolor='r', facecolor='none')
plt.gca().add_patch(rect)
if im is None:
plt.gca().invert_yaxis()
plt.axis('equal')
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
# plt.gca().set_frame_on(False)
plt.gca().set_axis_off()
else: # remove all margins
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().set_frame_on(False)
plt.gca().set_axis_off()
# return fig
improj = np.array(fig2data(fig))
if visdom_env is not None:
viz = get_visdom_connection()
viz.image(np.array(improj).transpose(2, 0, 1),
env=visdom_env, opts={'title': title}, win=visdom_win)
plt.close(fig)
return improj
def extend_to_3d_skeleton_simple(ptcloud, sticks, line_resol=10, rgb=None):
H36M_TO_MPII_PERM = [3, 2, 1, 4, 5, 6,
0, 8, 9, 10, 16, 15, 14, 11, 12, 13]
rgb_now = rgb.T if rgb is not None else None
ptcloud_now = ptcloud.T
ptcloud = ptcloud.T
rgb = rgb.T if rgb is not None else rgb
if ptcloud_now.shape[1] == 16: # MPII
sticks_new = []
for stick in sticks:
if stick[0] in H36M_TO_MPII_PERM and stick[1] in H36M_TO_MPII_PERM:
s1 = H36M_TO_MPII_PERM.index(int(stick[0]))
s2 = H36M_TO_MPII_PERM.index(int(stick[1]))
sticks_new.append([s1, s2])
sticks = sticks_new
for sticki, stick in enumerate(sticks):
alpha = np.linspace(0, 1, line_resol)[:, None]
linepoints = ptcloud[stick[0], :][None, :] * alpha + \
ptcloud[stick[1], :][None, :] * (1. - alpha)
ptcloud_now = np.concatenate((ptcloud_now, linepoints), axis=0)
if rgb is not None:
linergb = rgb[stick[0], :][None, :] * alpha + \
rgb[stick[1], :][None, :] * (1.-alpha)
rgb_now = np.concatenate(
(rgb_now, linergb.astype(np.int32)), axis=0)
if rgb is not None:
rgb_now = rgb_now.T
return ptcloud_now.T, rgb_now
def visdom_plot_pointclouds(viz, pcl, visdom_env, title,
plot_legend=True, markersize=2,
nmax=5000, sticks=None, win=None):
if sticks is not None:
pcl = {k: extend_to_3d_skeleton_simple(v, sticks)[0]
for k, v in pcl.items()}
legend = list(pcl.keys())
cmap = 'tab10'
npcl = len(pcl)
rgb = (cm.get_cmap(cmap)(np.linspace(0, 1, 10))
[:, :3]*255.).astype(np.int32).T
rgb = np.tile(rgb, (1, int(np.ceil(npcl/10))))[:, 0:npcl]
rgb_cat = {k: np.tile(rgb[:, i:i+1], (1, p.shape[1])) for
i, (k, p) in enumerate(pcl.items())}
rgb_cat = np.concatenate(list(rgb_cat.values()), axis=1)
pcl_cat = np.concatenate(list(pcl.values()), axis=1)
if pcl_cat.shape[1] > nmax:
with NumpySeedFix():
prm = np.random.permutation(
pcl_cat.shape[1])[0:nmax]
pcl_cat = pcl_cat[:, prm]
rgb_cat = rgb_cat[:, prm]
win = viz.scatter(pcl_cat.T, env=visdom_env,
opts={'title': title, 'markersize': markersize,
'markercolor': rgb_cat.T}, win=win)
# legend
if plot_legend:
dummy_vals = np.tile(
np.arange(npcl)[:, None], (1, 2)).astype(np.float32)
title = "%s_%s" % (title, legend)
opts = dict(title=title, legend=legend, width=400, height=400)
viz.line(dummy_vals.T, env=visdom_env, opts=opts)
return win
def matplot_plot_point_cloud(ptcloud, pointsize=20, azim=90, elev=90,
figsize=(8, 8), title=None, sticks=None, lim=None,
cmap='gist_ncar', ax=None, subsample=None,
flip_y=False):
if lim is None:
lim = np.abs(ptcloud).max()
nkp = int(ptcloud.shape[1])
pid = np.linspace(0., 1., nkp)
rgb = (cm.get_cmap(cmap)(pid)[:, :3]*255.).astype(np.int32)
if subsample is not None:
with NumpySeedFix():
prm = np.random.permutation(nkp)[0:subsample]
pid = pid[prm]
rgb = rgb[prm, :]
ptcloud = ptcloud[:, prm]
if flip_y:
ptcloud[1, :] = -ptcloud[1, :]
if ax is not None:
fig = None
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=elev, azim=azim)
if sticks is not None:
for stick in sticks:
line = ptcloud[:, [stick[0], stick[1]]]
xs, ys, zs = line
ax.plot(xs, ys, zs, color='black')
xs, ys, zs = ptcloud
ax.scatter(xs, ys, zs, s=pointsize, c=pid, marker='.', cmap=cmap)
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_zlim(-lim, lim)
ax.set_zticklabels([])
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.axis('off')
if title is not None:
ax.set_title(title)
plt.show()
return fig
# old functions to replace:
def enlarge_box(box, perc, imsz):
boxw, boxh = box[2]-box[0], box[3]-box[1]
box[0] -= boxw*perc
box[1] -= boxh*perc
box[2] += boxw*perc
box[3] += boxh*perc
imh, imw = imsz
box = np.maximum(np.minimum(box, np.array([imw, imh, imw, imh])), 0.)
return box
| c3dpo_nrsfm-main | tools/vis_utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import copy
from dataset.dataset_configs import IMAGE_ROOTS, DATASET_ROOT
from dataset.keypoints_dataset import KeypointsDataset
def dataset_zoo(dataset_name='h36m',
sets_to_load=('train', 'val'),
force_download=False,
TRAIN={'rand_sample': -1,
'limit_to': -1},
VAL={'rand_sample': -1,
'limit_to': -1},
TEST={'rand_sample': -1,
'limit_to': -1},
**kwargs):
assert dataset_name in ['h36m', 'h36m_hourglass',
'pascal3d', 'pascal3d_hrnet', 'up3d_79kp',
'cub_birds', 'cub_birds_hrnet']
main_root = DATASET_ROOT
json_train = os.path.join(main_root, dataset_name + '_train.json')
if dataset_name == 'up3d_79kp':
# for up3d we eval on test set ...
json_val = os.path.join(main_root, dataset_name + '_test.json')
else:
json_val = os.path.join(main_root, dataset_name + '_val.json')
image_roots = copy.deepcopy(IMAGE_ROOTS)
image_roots = image_roots[dataset_name] \
if dataset_name in image_roots else None
if image_roots is not None:
if len(image_roots) == 2:
image_root_train, image_root_val = image_roots
elif len(image_roots) == 1:
image_root_train = image_root_val = image_roots[0]
else:
raise ValueError('cant be')
else:
image_root_train = image_root_val = None
# auto-download dataset file if doesnt exist
for json_file in (json_train, json_val):
if not os.path.isfile(json_file) or force_download:
download_dataset_json(json_file)
dataset_train = None
dataset_val = None
dataset_test = None
if 'train' in sets_to_load:
dataset_train = KeypointsDataset(
image_root=image_root_train,
jsonfile=json_train, train=True, **TRAIN)
if 'val' in sets_to_load:
dataset_val = KeypointsDataset(
image_root=image_root_val,
jsonfile=json_val, train=False, **VAL)
dataset_test = dataset_val
return dataset_train, dataset_val, dataset_test
def download_dataset_json(json_file):
import urllib.request
import json
from dataset.dataset_configs import DATASET_URL, DATASET_MD5
from tools.utils import md5
json_dir = '/'.join(json_file.split('/')[0:-1])
json_name = json_file.split('/')[-1].split('.')[0]
os.makedirs(json_dir, exist_ok=True)
url = DATASET_URL[json_name]
print('downloading dataset json %s from %s' % (json_name, url))
try:
urllib.request.urlretrieve(url, json_file)
except:
if os.path.isfile(json_file):
os.remove(json_file)
print('checking dataset %s' % json_name)
assert md5(json_file) == DATASET_MD5[json_name], 'bad md5!'
with open(json_file, 'r') as f:
dt = json.load(f)
assert dt['dataset'] == json_name
| c3dpo_nrsfm-main | dataset/dataset_zoo.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import numpy as np
from tabulate import tabulate
from tqdm import tqdm
def eval_zoo(dataset_name, include_debug_vars=False):
if dataset_name in ('h36m', 'h36m_hourglass'):
eval_script = eval_h36m
cache_vars = ['kp_loc_3d', 'h36m_info', 'shape_image_coord']
eval_vars = ['EVAL_MPJPE_orig', 'EVAL_MPJPE_best', 'EVAL_stress']
elif dataset_name in ('pascal3d', 'pascal3d_hrnet'):
eval_script = eval_pascal3d
cache_vars = ['kp_loc_3d', 'p3d_info', 'class_mask',
'shape_image_coord', 'kp_defined', 'kp_vis']
eval_vars = ['EVAL_MPJPE_orig', 'EVAL_MPJPE_best', 'EVAL_stress']
elif dataset_name in ('up3d_79kp'):
eval_script = eval_up3d_79kp
cache_vars = ['kp_loc_3d', 'shape_image_coord']
eval_vars = ['EVAL_MPJPE_orig', 'EVAL_MPJPE_best', 'EVAL_stress']
elif dataset_name in ('cub_birds', 'cub_birds_hrnet'):
eval_script = eval_dummy
cache_vars = ['shape_image_coord']
eval_vars = ['EVAL_dummy']
else:
assert False, ("no such dataset eval %s" % dataset_name)
return eval_script, cache_vars, eval_vars
def eval_dummy(cached_preds, eval_vars=None):
return {'EVAL_dummy': 0.}, None
def eval_pascal3d(cached_preds, eval_vars=None,
N_CLS=12, N_KP=124, N_ENTRIES=1950):
"""
evaluates 3d error metrics on pascal3d
"""
from dataset.dataset_configs import P3D_CLASSES, P3D_NUM_IMAGES
print('PAS 3D evaluation ...')
gt = np.array(cached_preds['kp_loc_3d'])
pred = np.array(cached_preds['shape_image_coord'])
classes = np.array(cached_preds['p3d_info']['p3d_class'])
class_mask = np.array(cached_preds['class_mask'])
kp_defined = np.array(cached_preds['kp_defined'])
eval_mask = class_mask * kp_defined
assert pred.shape[2] == N_KP
for arr in (gt, pred, classes, class_mask, kp_defined, eval_mask):
assert len(arr) == N_ENTRIES, 'wrong n of predictions!'
results = calc_3d_errs(pred, gt, fix_mean_depth=True,
scale=float(1), mask=eval_mask)
metrics = list(results.keys())
# check that eval_vars are all evaluated
if eval_vars is not None:
for m in eval_vars:
assert m in metrics, "missing metric %s!" % m
print("eval vars checks ok!")
all_avg_results, avg_class_results = \
eval_results_per_class(classes, results, P3D_CLASSES,
N_PER_CLASS=P3D_NUM_IMAGES['val'])
print_results_per_class(avg_class_results, all_avg_results)
aux_out = {}
aux_out['per_sample_err'] = results
aux_out['per_class_err'] = avg_class_results
return all_avg_results, aux_out
def eval_up3d_79kp(cached_preds,
eval_vars=None,
N_ENTRIES=15000):
print('UP3D evaluation ... (tgt n entries = %d)' % N_ENTRIES)
gt = np.array(cached_preds['kp_loc_3d'])
pred = np.array(cached_preds['shape_image_coord'])
for arr in (gt, pred):
assert len(arr) == N_ENTRIES, 'wrong n of predictions!'
results = calc_3d_errs(pred, gt, fix_mean_depth=True)
metrics = list(results.keys())
# check that eval_vars are all evaluated
if eval_vars is not None:
for m in eval_vars:
assert m in metrics, "missing metric %s!" % m
all_avg_results = {}
for metric in metrics:
all_avg_results[metric] = float(np.array(results[metric]).mean())
print("%20s: %20s" % (metric, "%1.4f" % all_avg_results[metric]))
aux_out = {}
aux_out['per_sample_err'] = results
return all_avg_results, aux_out
def eval_h36m(cached_preds,
eval_vars=None,
N_ENTRIES=109556,
norm_to_hip=True):
from dataset.dataset_configs import H36M_ACTIONS
print('H36M evaluation ... (tgt n entries = %d)' % N_ENTRIES)
gt = np.array(cached_preds['kp_loc_3d'])
pred = np.array(cached_preds['shape_image_coord'])
scale = np.array(cached_preds['h36m_info']['scale'])
action_names = cached_preds['h36m_info']['action_name']
for arr in (gt, pred, scale, action_names):
assert len(arr) == N_ENTRIES, 'wrong n of predictions!'
if norm_to_hip:
pred = pred - pred[:, :, 0:1]
results = calc_3d_errs(pred, gt,
fix_mean_depth=False,
scale=scale)
metrics = list(results.keys())
# check that eval_vars are all evaluated
if eval_vars is not None:
for m in eval_vars:
assert m in metrics, "missing metric %s!" % m
# print("eval vars checks ok!")
all_avg_results, avg_action_results = \
eval_results_per_class(action_names, results, H36M_ACTIONS)
print_results_per_class(avg_action_results, all_avg_results)
aux_out = {}
aux_out['per_sample_err'] = results
aux_out['per_class_err'] = avg_action_results
return all_avg_results, aux_out
def eval_results_per_class(classes, results, CLASS_LIST, N_PER_CLASS=None):
metrics = list(results.keys())
avg_cls_results = {}
for cls_ in CLASS_LIST:
ok_cls = [ei for ei, _ in enumerate(classes) if classes[ei] == cls_]
cls_results = {k: v[ok_cls] for k, v in results.items()}
if N_PER_CLASS is not None:
assert len(ok_cls) == N_PER_CLASS[cls_]
if True: # asserts ...
for k, v in cls_results.items():
assert v.size == len(ok_cls)
avg_cls_results[cls_] = {k: np.array(
v).mean() for k, v in cls_results.items()}
all_avg_results = {}
for metric in metrics:
avgmetric = [v[metric] for _, v in avg_cls_results.items()]
all_avg_results[metric] = float(np.array(avgmetric).mean())
return all_avg_results, avg_cls_results
def print_results_per_class(avg_cls_results, all_avg_results):
metrics = list(all_avg_results.keys())
# result printing
avg_results_print = copy.deepcopy(avg_cls_results)
avg_results_print['== Mean =='] = all_avg_results
tab_rows = []
for cls_, cls_metrics in avg_results_print.items():
tab_row = [cls_]
for metric in metrics:
val = cls_metrics[metric]
tab_row.append("%1.3f" % val)
tab_rows.append(tab_row)
headers = ['classes']
headers.extend(copy.deepcopy(metrics))
print(tabulate(tab_rows, headers=headers))
def calc_3d_errs(pred, gt,
fix_mean_depth=False,
get_best_scale=False,
scale=float(1), mask=None):
pred_flip = np.copy(pred)
pred_flip[:, 2, :] = -pred_flip[:, 2, :]
pairs_compare = {'EVAL_MPJPE_orig': pred,
'EVAL_MPJPE_flip': pred_flip}
results = {}
for metric, pred_compare in pairs_compare.items():
results[metric] = calc_dist_err(gt, pred_compare,
fix_mean_depth=fix_mean_depth,
get_best_scale=get_best_scale,
scale=scale,
mask=mask)
results['EVAL_MPJPE_best'] = np.minimum(results['EVAL_MPJPE_orig'],
results['EVAL_MPJPE_flip'])
results['EVAL_stress'] = calc_stress_err(gt, pred, mask=mask, scale=scale)
return results
def calc_stress_err(gt, pred, scale=1., mask=None, get_best_scale=False):
assert pred.shape[1] == 3
assert gt.shape[1] == 3
assert pred.shape[0] == gt.shape[0]
assert pred.shape[2] == gt.shape[2]
if get_best_scale:
argmin_scale = compute_best_scale(pred, gt, v=mask)
pred = pred.copy() * argmin_scale[:, None, None]
errs = []
nkp = gt.shape[2]
if mask is not None:
tridx_cache = [np.triu_indices(k, k=1)
for k in range(nkp+1)]
assert mask.shape[1] == pred.shape[2]
assert mask.shape[0] == pred.shape[0]
else:
tridx = np.triu_indices(nkp, k=1)
assert len(tridx[0]) == (nkp*(nkp-1))/2
print('stress eval:')
with tqdm(total=len(gt)) as tq:
for ii, (g_, p_) in enumerate(zip(gt, pred)):
if mask is not None:
mask_ = mask[ii]
else:
mask_ = None
edm_g = calc_edm(g_, squared=False, mask=mask_)
edm_p = calc_edm(p_, squared=False, mask=mask_)
stress = np.abs(edm_g - edm_p)
if mask_ is not None:
nkp_now = edm_g.shape[0]
assert mask_.sum() == nkp_now
tridx = tridx_cache[nkp_now]
mstress = stress[tridx[0], tridx[1]]
mstress = mstress.mean()
# if True:
# triu_mask_ = np.triu(np.ones(nkp),k=1)
# mstress_ = (stress * triu_mask_).sum() / triu_mask_.sum()
# assert np.abs(mstress - mstress_) <= 1e-3
errs.append(mstress)
tq.update(1)
errs = np.array(errs) * scale
return errs
def calc_dist_err(gt, pred, scale=1.,
fix_mean_depth=False,
get_best_scale=False,
mask=None):
assert pred.shape[1] == 3
assert gt.shape[1] == 3
assert pred.shape[0] == gt.shape[0]
assert pred.shape[2] == gt.shape[2]
if fix_mean_depth:
# print('setting mean depth = 0')
pred = set_mean_depth_to_0(pred, mask=mask)
gt = set_mean_depth_to_0(gt, mask=mask)
if get_best_scale:
argmin_scale = compute_best_scale(pred, gt, v=mask)
pred = pred.copy() * argmin_scale[:, None, None]
df = pred - gt
errs = np_safe_sqrt((df*df).sum(1))
if True:
errs_ = np.sqrt((df*df).sum(1))
df__ = np.max(np.abs(errs-errs_))
assert df__ <= 1e-5
# print('err diff = %1.2e' % df__)
if mask is not None:
assert mask.shape[0] == pred.shape[0]
assert mask.shape[1] == pred.shape[2]
assert len(mask.shape) == 2
errs = (mask*errs).sum(1) / mask.sum(1)
else:
errs = errs.mean(1)
errs = errs * scale
return errs
def set_mean_depth_to_0(x, mask=None):
x = x.copy()
if mask is not None:
x = x * mask[:, None, :]
mu_depth = (x.sum(2)/mask.sum(1)[:, None])[:, 2]
else:
mu_depth = x.mean(2)[:, 2]
x[:, 2, :] = x[:, 2, :] - mu_depth[:, None]
if mask is not None:
x = x * mask[:, None, :]
return x
def np_safe_sqrt(x):
y = np.zeros_like(x)
assert (x > -1e-5).all()
x_good = x > 0
y[x_good] = np.sqrt(x[x_good])
return y
def calc_edm(x, squared=True, mask=None):
if mask is not None:
x = x.copy()[:, mask == 1]
xx = x.T @ x
x2 = (x*x).sum(0)
edm = x2[:, None]+x2[None, :]-2.*xx
edm = np.maximum(edm, 0.)
if not squared:
edm = np_safe_sqrt(edm)
# edm = np.sqrt(edm)
# if True:
# import scipy
# import scipy.spatial
# edm_ = scipy.spatial.distance.cdist(x.T,x.T)
# df = np.abs(edm-edm_).max()
# assert df <= edm.mean()/200., '%1.3e' % df
# # print('df = %1.3f' % df)
# # scipy.spatial.distance.pdist(x.T)
return edm
def compute_best_scale():
raise NotImplementedError('not yet finished')
| c3dpo_nrsfm-main | dataset/eval_zoo.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import json
import copy
import numpy as np
import torch
import pickle
from torch.utils import data
from tools.utils import NumpySeedFix, auto_init_args
class KeypointsDataset(data.Dataset):
"""
This is a generalized class suitable
for storing object keypoint annotations
The input jsonfile needs to be a list of dictionaries
(one dictionary per pose annotation) of the form:
{
# REQUIRED FIELDS #
"kp_loc" : 2 x N list of keypoints
"kp_vis" : 1 x N list of 1/0 boolean indicators
# OPTIONAL FIELDS #
"file_name": name of file from image_root
"kp_loc_3d": 3 x N list of 3D GT keypoint locations in camera coords
}
"""
def __init__(self,
jsonfile=None,
train=True,
limit_to=0,
rand_sample=0,
image_root=None,
refresh_db=False,
):
auto_init_args(self)
self.load_db_file()
has_classes = 'class_mask' in self.db[0]
if has_classes:
self.class_db = self.get_class_db()
else:
self.class_db = None
def load_db_file(self):
print("loading data from %s" % self.jsonfile)
ext = self.jsonfile.split('.')[-1]
if ext == 'json':
with open(self.jsonfile, 'r') as data_file:
db = json.load(data_file)
elif ext == 'pkl':
with open(self.jsonfile, 'rb') as data_file:
db = pickle.load(data_file)
else:
raise ValueError('bad extension %s' % ext)
# the gdrive-downloaded jsons have a slightly different format:
if 'dataset' in db:
db = db['data']
print("data train=%d , n frames = %d" % (self.train, len(db)))
self.db = db
self.restrict_images()
def get_class_db(self):
print('parsing class db ...')
masks = np.stack([np.array(e['class_mask']) for e in self.db])
unq_masks = np.unique(masks, axis=0)
class_db = {tuple(m.tolist()): [] for m in unq_masks}
for ei, e in enumerate(self.db):
class_db[tuple(e['class_mask'])].append(ei)
class_db = list(class_db.values())
for eis in class_db: # sanity check
cls_array = np.stack([self.db[ei]['class_mask'] for ei in eis])
assert ((cls_array - cls_array[0:1, :])**2).sum() <= 1e-6
return class_db
def restrict_images(self):
if self.limit_to > 0:
tgtnum = min(self.limit_to, len(self.db))
with NumpySeedFix():
prm = np.random.permutation(
len(self.db))[0:tgtnum]
print("limitting dataset to %d samples" % tgtnum)
self.db = [self.db[i] for i in prm]
def __len__(self):
if self.rand_sample > 0:
return self.rand_sample
else:
return len(self.db)
def __getitem__(self, index):
if self.rand_sample > 0:
if self.class_db is not None:
# in case we have classes, sample first rand class
# and then image index
cls_index = np.random.randint(len(self.class_db))
index = np.random.choice(self.class_db[cls_index])
else:
index = np.random.randint(len(self.db))
entry = copy.deepcopy(self.db[index])
# convert to torch Tensors where possible
for fld in ('kp_loc', 'kp_vis', 'kp_loc_3d',
'class_mask', 'kp_defined'):
if fld in entry:
entry[fld] = torch.FloatTensor(entry[fld])
if self.image_root is not None and 'image_path' in entry:
entry['image_path'] = os.path.join(
self.image_root, entry['image_path'])
else:
entry['image_path'] = '<NONE>'
if 'p3d_info' in entry: # filter the kp out of bbox
bbox = torch.FloatTensor(entry['p3d_info']['bbox'])
bbox_vis, bbox_err = bbox_kp_visibility(
bbox, entry['kp_loc'], entry['kp_vis'])
entry['kp_vis'] = entry['kp_vis'] * bbox_vis.float()
# mask out invisible
entry['kp_loc'] = entry['kp_loc'] * entry['kp_vis'][None]
return entry
def bbox_kp_visibility(bbox, keypoints, vis):
bx, by, bw, bh = bbox
x = keypoints[0]
y = keypoints[1]
ctx_ = 0.1
in_box = (x >= bx-ctx_*bw) * (x <= bx+bw*(1+ctx_)) * \
(y >= by-ctx_*bh) * (y <= by+bh*(1+ctx_))
in_box = in_box * (vis == 1)
err = torch.stack([(bx-ctx_*bw)-x,
x-(bx+bw*(1+ctx_)),
(by-ctx_*bh)-y,
y-(by+bh*(1+ctx_))])
err = torch.relu(err) * vis[None].float()
err = torch.stack((torch.max(err[0], err[1]),
torch.max(err[2], err[3]))).max(dim=1)[0]
return in_box, err
| c3dpo_nrsfm-main | dataset/keypoints_dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
EXP_ROOT = './data/exps/c3dpo/'
DATASET_ROOT = './data/datasets/c3dpo/'
DATASET_URL = {
'pascal3d_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/pascal3d_val.json',
'pascal3d_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/pascal3d_train.json',
'pascal3d_hrnet_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/pascal3d_hrnet_val.json',
'pascal3d_hrnet_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/pascal3d_hrnet_train.json',
'h36m_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/h36m_val.json',
'h36m_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/h36m_train.json',
'h36m_hourglass_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/h36m_hourglass_val.json',
'h36m_hourglass_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/h36m_hourglass_train.json',
'cub_birds_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/cub_birds_val.json',
'cub_birds_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/cub_birds_train.json',
'cub_birds_hrnet_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/cub_birds_hrnet_val.json',
'cub_birds_hrnet_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/cub_birds_hrnet_train.json',
'up3d_79kp_train': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/up3d_79kp_train.json',
'up3d_79kp_val': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/up3d_79kp_val.json',
'up3d_79kp_test': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/up3d_79kp_test.json',
}
DATASET_MD5 = {
"h36m_train": "454e2aee4cad761499265f858fe2e0ff",
"h36m_val": "d2347fc651e7f704ce3a4da880852fff",
"h36m_hourglass_train": "d2ffcaf4ce9e49712a65e2b1932814a3",
"h36m_hourglass_val": "9996a703cb3b24da3b5563baa09da2bd",
"pascal3d_hrnet_train": "c145b879e7462f8942a258f7c6dcbee4",
"pascal3d_hrnet_val": "5cb55986b1c19253f0b8213e47688443",
"pascal3d_train": "a78048a101ef56bc371b01f66c19178b",
"pascal3d_val": "0128817c43eaa1eff268d5295700c8ad",
"up3d_79kp_train": "fde2aee038ecd0f145181559eff59c9f",
"up3d_79kp_test": "7d8bf3405ec085394e9257440e8bcb18",
}
MODEL_URL = {
'pascal3d': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/c3dpo_pretrained/pretrained_pascal3d/model_epoch_00000000.pth',
'pascal3d_hrnet': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/c3dpo_pretrained/pretrained_pascal3d_hrnet/model_epoch_00000000.pth',
'h36m': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/c3dpo_pretrained/pretrained_h36m/model_epoch_00000000.pth',
'h36m_hourglass': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/c3dpo_pretrained/pretrained_h36m_hourglass/model_epoch_00000000.pth',
# 'cub_birds': '', TODO(dnovotny)
'up3d_79kp': 'https://dl.fbaipublicfiles.com/c3dpo_nrsfm/c3dpo_pretrained/pretrained_up3d_79kp/model_epoch_00000000.pth',
}
MODEL_MD5 = {
"h36m": "280bce4d1074e1140a0cc23806bcf8cf",
"h36m_hourglass": "4dd849bf6d3b0b6e5d93afbed9cad187",
"pascal3d_hrnet": "464163c58f2827b45def014135870844",
"pascal3d": "464163c58f2827b45def014135870844",
"up3d_79kp": "2de88ac68f0fbb0763dcbce039d74610",
}
# list of root folders containing the dataset images
IMAGE_ROOTS = {}
# ----- connectivity patterns for visualizing the stick-men
STICKS = {
'pose_track': [[2, 0], [0, 1], [1, 5], [5, 7],
[9, 7], [1, 6], [6, 8], [10, 8],
[1, 12], [12, 11], [11, 1], [14, 12],
[11, 13], [15, 13], [16, 14]],
'h36m': [[10, 9], [9, 8], [8, 14],
[14, 15], [15, 16], [8, 11],
[11, 12], [12, 13], [8, 7],
[7, 0], [1, 0], [1, 2],
[2, 3], [0, 4], [4, 5], [5, 6]],
'cub_birds': [[1, 5], [5, 4], [4, 9],
[9, 0], [0, 13], [0, 12],
[0, 8], [12, 13], [1, 14],
[14, 3], [3, 2], [2, 7],
[1, 10], [1, 6], [2, 11],
[2, 7], [8, 13]],
'coco': [[13, 15], [14, 16], [12, 14], [11, 12, ], [11, 13],
[0, 12], [0, 11], [8, 10], [6, 8],
[7, 9], [5, 7], [0, 5], [0, 6],
[0, 3], [0, 4], [0, 2], [0, 1]],
'freicars': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'pascal3d': {
'car': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'aeroplane': [[2, 5], [1, 4], [5, 3], [3, 7],
[7, 0], [0, 5], [5, 7], [5, 6],
[6, 0], [6, 3], [2, 4], [2, 1]],
'motorbike': [[6, 2],
[2, 9],
[2, 3],
[3, 8],
[5, 8],
[3, 5],
[2, 1],
[1, 0],
[0, 7],
[0, 4],
[4, 7],
[1, 4],
[1, 7],
[1, 5],
[1, 8]],
'sofa': [[1, 5],
[5, 4],
[4, 6],
[6, 2],
[2, 0],
[1, 0],
[0, 4],
[1, 3],
[7, 5],
[2, 3],
[3, 7],
[9, 7],
[7, 6],
[6, 8],
[8, 9]],
'chair': [[7, 3],
[6, 2],
[9, 5],
[8, 4],
[7, 9],
[8, 6],
[6, 7],
[9, 8],
[9, 1],
[8, 0],
[1, 0]],
},
}
STICKS['cub_birds_hrnet'] = STICKS['cub_birds']
H36M_ACTIONS = ['Directions', 'Discussion', 'Eating', 'Greeting',
'Phoning', 'Photo', 'Posing', 'Purchases', 'Sitting',
'SittingDown', 'Smoking', 'Waiting', 'WalkDog',
'Walking', 'WalkTogether']
P3D_NUM_KEYPOINTS = {
'aeroplane': 8,
'car': 12,
'tvmonitor': 8,
'sofa': 10,
'motorbike': 10,
'diningtable': 12,
'chair': 10,
'bus': 12,
'bottle': 7,
'boat': 7,
'bicycle': 11,
'train': 17}
P3D_CLASSES = list(P3D_NUM_KEYPOINTS.keys())
P3D_NUM_IMAGES = {
'train': {"aeroplane": 1953, "car": 5627,
"tvmonitor": 1374, "sofa": 669,
"motorbike": 725, "diningtable": 751,
"chair": 1186, "bus": 1185,
"bottle": 1601, "boat": 2046,
"bicycle": 904, "train": 1113, },
'val': {"aeroplane": 269, "car": 294,
"tvmonitor": 206, "sofa": 37,
"motorbike": 116, "diningtable": 12,
"chair": 227, "bus": 153,
"bottle": 249, "boat": 163,
"bicycle": 115, "train": 109}}
| c3dpo_nrsfm-main | dataset/dataset_configs.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import matplotlib.pyplot as plt
import torch
import numpy as np
from tools.video_writer import VideoWriter
from tools.vis_utils import matplot_plot_point_cloud, get_visdom_connection
from tools.so3 import so3_exponential_map
def rotating_3d_video(
shape,
video_path='/tmp/video.mp4',
fps=10,
vlen=4,
sticks=None,
title='rotating 3d',
cmap='rainbow',
visdom_env=None,
visdom_win=None,
get_frames=0,
):
# center
mean = shape.sum(1) / shape.shape[1]
shape = shape - mean[:, None]
lim = float(torch.topk(shape.view(-1), int(0.95*shape.numel()))[0][0])
axis = torch.FloatTensor([0, 1, 0])
angles = torch.linspace(0, np.pi*2, fps*vlen)
log_rots = axis[None, :] * angles[:, None]
Rs = so3_exponential_map(log_rots)
shape_rot = torch.bmm(Rs, shape[None].repeat(len(Rs), 1, 1))
extract_frames = []
if get_frames > 0:
extract_frames = np.round(np.linspace(0, len(Rs)-1, get_frames))
vw = VideoWriter(out_path=video_path)
for ii, shape_rot_ in enumerate(shape_rot):
fig = matplot_plot_point_cloud(shape_rot_.numpy(),
pointsize=300, azim=-90, elev=90,
figsize=(8, 8), title=title,
sticks=sticks, lim=lim,
cmap=cmap, ax=None, subsample=None,
flip_y=True)
vw.write_frame(fig)
if ii in extract_frames:
framefile = os.path.splitext(video_path)[0] + '_%04d.png' % ii
print('exporting %s' % framefile)
plt.savefig(framefile)
plt.close(fig)
vidpath = vw.get_video(silent=True)
if visdom_env is not None:
viz = get_visdom_connection()
viz.video(videofile=vidpath, opts={'title': title},
env=visdom_env, win=visdom_win)
return vidpath
| c3dpo_nrsfm-main | visuals/rotating_shape_video.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements_file(path):
with open(path) as f:
reqs = []
for line in f:
line = line.strip()
reqs.append(line.split("==")[0])
return reqs
reqs_main = parse_requirements_file("requirements/main.txt")
reqs_dev = parse_requirements_file("requirements/dev.txt")
setuptools.setup(
name="active-mri-acquisition",
version="0.1.0",
author="Facebook AI Research",
description="A reinforcement learning environment for active MRI acquisition.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/active-mri-acquisition/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence :: Medical Imaging",
],
python_requires=">=3.7",
install_requires=reqs_main,
extras_require={"dev": reqs_main + reqs_dev},
)
| active-mri-acquisition-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import nox
@nox.session()
def lint(session):
session.install("--upgrade", "setuptools", "pip")
session.install("-r", "requirements/dev.txt")
session.run("flake8", "activemri")
# session.run("black", "--check", "activemri")
@nox.session()
def mypy(session):
session.install("--upgrade", "setuptools", "pip")
session.install("-r", "requirements/dev.txt")
session.run("mypy", "activemri")
@nox.session()
def pytest(session) -> None:
session.install("--upgrade", "setuptools", "pip")
session.install("torch")
session.install("torchvision")
session.install("-e", ".")
session.run("pytest", "tests/core")
| active-mri-acquisition-main | noxfile.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import data, envs, experimental
__all__ = ["data", "envs", "experimental"]
| active-mri-acquisition-main | activemri/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import cvpr19_models
__all__ = ["cvpr19_models"]
| active-mri-acquisition-main | activemri/experimental/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import data, models, options, util
__all__ = ["data", "models", "options", "util"]
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ignite.engine
import logging
import os
import tempfile
import types
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.metrics import Loss
from tensorboardX import SummaryWriter
from typing import Any, Dict, Tuple
import activemri.experimental.cvpr19_models.data as data
import activemri.experimental.cvpr19_models.models as models
import activemri.experimental.cvpr19_models.options as options
import activemri.experimental.cvpr19_models.util as util
def run_validation_and_update_best_checkpoint(
engine: ignite.engine.Engine,
val_engine: ignite.engine.Engine = None,
progress_bar: ignite.contrib.handlers.ProgressBar = None,
val_loader: torch.utils.data.DataLoader = None,
trainer: "Trainer" = None,
):
val_engine.run(val_loader)
metrics = val_engine.state.metrics
if trainer.options.use_evaluator:
progress_bar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} "
f"MSE: {metrics['mse']:.3f} SSIM: {metrics['ssim']:.3f} loss_D: "
f"{metrics['loss_D']:.3f}"
)
else:
progress_bar.log_message(
f"Validation Results - Epoch: {engine.state.epoch} "
f"MSE: {metrics['mse']:.3f} SSIM: {metrics['ssim']:.3f}"
)
trainer.completed_epochs += 1
score = -metrics["loss_D"] if trainer.options.only_evaluator else -metrics["mse"]
if score > trainer.best_validation_score:
trainer.best_validation_score = score
full_path = save_checkpoint_function(trainer, "best_checkpoint")
progress_bar.log_message(
f"Saved best checkpoint to {full_path}. Score: {score}. "
f"Iteration: {engine.state.iteration}"
)
def save_checkpoint_function(trainer: "Trainer", filename: str) -> str:
# Ensures atomic checkpoint save to avoid corrupted files if preempted during a save operation
tmp_filename = tempfile.NamedTemporaryFile(
delete=False, dir=trainer.options.checkpoints_dir
)
try:
torch.save(trainer.create_checkpoint(), tmp_filename)
except BaseException:
tmp_filename.close()
os.remove(tmp_filename.name)
raise
else:
tmp_filename.close()
full_path = os.path.join(trainer.options.checkpoints_dir, filename + ".pth")
os.rename(tmp_filename.name, full_path)
return full_path
def save_regular_checkpoint(
engine: ignite.engine.Engine,
trainer: "Trainer" = None,
progress_bar: ignite.contrib.handlers.ProgressBar = None,
):
full_path = save_checkpoint_function(trainer, "regular_checkpoint")
progress_bar.log_message(
f"Saved regular checkpoint to {full_path}. Epoch: {trainer.completed_epochs}, "
f"Iteration: {engine.state.iteration}"
)
class Trainer:
def __init__(self, options: types.SimpleNamespace):
self.reconstructor: torch.nn.Module = None
self.evaluator: torch.nn.Module = None
self.options = options
self.best_validation_score = -float("inf")
self.completed_epochs = 0
self.updates_performed = 0
criterion_gan = models.fft_utils.GANLossKspace(
use_mse_as_energy=options.use_mse_as_disc_energy,
grad_ctx=options.grad_ctx,
gamma=options.gamma,
options=self.options,
).to(options.device)
self.losses = {
"GAN": criterion_gan,
"NLL": models.fft_utils.gaussian_nll_loss,
}
if self.options.only_evaluator:
self.options.checkpoints_dir = os.path.join(
self.options.checkpoints_dir,
"evaluator",
)
if not os.path.exists(self.options.checkpoints_dir):
os.makedirs(self.options.checkpoints_dir)
def create_checkpoint(self) -> Dict[str, Any]:
return {
"reconstructor": self.reconstructor.state_dict(),
"evaluator": self.evaluator.state_dict()
if self.options.use_evaluator
else None,
"options": self.options,
"optimizer_G": self.optimizers["G"].state_dict(),
"optimizer_D": self.optimizers["D"].state_dict()
if self.options.use_evaluator
else None,
"completed_epochs": self.completed_epochs,
"best_validation_score": self.best_validation_score,
"updates_performed": self.updates_performed,
}
def get_loaders(
self,
) -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]:
train_data_loader, val_data_loader = data.create_data_loaders(self.options)
return train_data_loader, val_data_loader
def inference(self, batch):
self.reconstructor.eval()
with torch.no_grad():
(
zero_filled_image,
ground_truth,
mask,
) = models.fft_utils.preprocess_inputs(
batch, self.options.dataroot, self.options.device
)
# Get reconstructor output
reconstructed_image, uncertainty_map, mask_embedding = self.reconstructor(
zero_filled_image, mask
)
reconstructor_eval = None
ground_truth_eval = None
if self.evaluator is not None:
self.evaluator.eval()
reconstructor_eval = self.evaluator(
reconstructed_image, mask_embedding, mask
)
ground_truth_eval = self.evaluator(ground_truth, mask_embedding, mask)
# Compute magnitude (for val losses and plots)
zero_filled_image_magnitude = models.fft_utils.to_magnitude(
zero_filled_image
)
reconstructed_image_magnitude = models.fft_utils.to_magnitude(
reconstructed_image
)
ground_truth_magnitude = models.fft_utils.to_magnitude(ground_truth)
if self.options.dataroot == "KNEE_RAW": # crop data
reconstructed_image_magnitude = models.fft_utils.center_crop(
reconstructed_image_magnitude, [320, 320]
)
ground_truth_magnitude = models.fft_utils.center_crop(
ground_truth_magnitude, [320, 320]
)
zero_filled_image_magnitude = models.fft_utils.center_crop(
zero_filled_image_magnitude, [320, 320]
)
uncertainty_map = models.fft_utils.center_crop(
uncertainty_map, [320, 320]
)
return {
"ground_truth": ground_truth,
"zero_filled_image": zero_filled_image,
"reconstructed_image": reconstructed_image,
"ground_truth_magnitude": ground_truth_magnitude,
"zero_filled_image_magnitude": zero_filled_image_magnitude,
"reconstructed_image_magnitude": reconstructed_image_magnitude,
"uncertainty_map": uncertainty_map,
"mask": mask,
"reconstructor_eval": reconstructor_eval,
"ground_truth_eval": ground_truth_eval,
}
def load_from_checkpoint_if_present(self):
if not os.path.exists(self.options.checkpoints_dir):
return
self.logger.info(f"Checkpoint folder found at {self.options.checkpoints_dir}")
files = os.listdir(self.options.checkpoints_dir)
for filename in files:
if "regular_checkpoint" in filename:
self.logger.info(f"Loading checkpoint {filename}.pth")
checkpoint = torch.load(
os.path.join(self.options.checkpoints_dir, filename)
)
self.reconstructor.load_state_dict(checkpoint["reconstructor"])
if self.options.use_evaluator:
self.evaluator.load_state_dict(checkpoint["evaluator"])
self.optimizers["D"].load_state_dict(checkpoint["optimizer_D"])
self.optimizers["G"].load_state_dict(checkpoint["optimizer_G"])
self.completed_epochs = checkpoint["completed_epochs"]
self.best_validation_score = checkpoint["best_validation_score"]
self.updates_performed = checkpoint["updates_performed"]
def load_weights_from_given_checkpoint(self):
if self.options.weights_checkpoint is None:
return
elif not os.path.exists(self.options.weights_checkpoint):
raise FileNotFoundError("Specified weights checkpoint do not exist!")
self.logger.info(
f"Loading weights from checkpoint found at {self.options.weights_checkpoint}."
)
checkpoint = torch.load(self.options.weights_checkpoint)
self.reconstructor.load_state_dict(checkpoint["reconstructor"])
if (
self.options.use_evaluator
and "evaluator" in checkpoint
and checkpoint["evaluator"] is not None
):
self.evaluator.load_state_dict(checkpoint["evaluator"])
else:
self.logger.info("Evaluator was not loaded.")
def update(self, batch):
if not self.options.only_evaluator:
self.reconstructor.train()
(zero_filled_image, target, mask,) = models.fft_utils.preprocess_inputs(
batch, self.options.dataroot, self.options.device
)
# Get reconstructor output
reconstructed_image, uncertainty_map, mask_embedding = self.reconstructor(
zero_filled_image, mask
)
# ------------------------------------------------------------------------
# Update evaluator and compute generator GAN Loss
# ------------------------------------------------------------------------
loss_G_GAN = 0
loss_D = torch.tensor(0.0)
if self.evaluator is not None:
self.evaluator.train()
self.optimizers["D"].zero_grad()
fake = reconstructed_image
detached_fake = fake.detach()
if self.options.mask_embed_dim != 0:
mask_embedding = mask_embedding.detach()
output = self.evaluator(
detached_fake,
mask_embedding,
mask if self.options.add_mask_eval else None,
)
loss_D_fake = self.losses["GAN"](
output, False, mask, degree=0, pred_and_gt=(detached_fake, target)
)
real = target
output = self.evaluator(
real, mask_embedding, mask if self.options.add_mask_eval else None
)
loss_D_real = self.losses["GAN"](
output, True, mask, degree=1, pred_and_gt=(detached_fake, target)
)
loss_D = loss_D_fake + loss_D_real
loss_D.backward(retain_graph=True)
self.optimizers["D"].step()
if not self.options.only_evaluator:
output = self.evaluator(
fake, mask_embedding, mask if self.options.add_mask_eval else None
)
loss_G_GAN = self.losses["GAN"](
output,
True,
mask,
degree=1,
updateG=True,
pred_and_gt=(fake, target),
)
loss_G_GAN *= self.options.lambda_gan
# ------------------------------------------------------------------------
# Update reconstructor
# ------------------------------------------------------------------------
loss_G = torch.tensor(0.0)
if not self.options.only_evaluator:
self.optimizers["G"].zero_grad()
loss_G = self.losses["NLL"](
reconstructed_image, target, uncertainty_map, self.options
).mean()
loss_G += loss_G_GAN
loss_G.backward()
self.optimizers["G"].step()
self.updates_performed += 1
return {"loss_D": loss_D.item(), "loss_G": loss_G.item()}
def discriminator_loss(
self,
reconstructor_eval,
target_eval,
reconstructed_image=None,
target=None,
mask=None,
):
if self.evaluator is None:
return 0
with torch.no_grad():
loss_D_fake = self.losses["GAN"](
reconstructor_eval,
False,
mask,
degree=0,
pred_and_gt=(reconstructed_image, target),
)
loss_D_real = self.losses["GAN"](
target_eval,
True,
mask,
degree=1,
pred_and_gt=(reconstructed_image, target),
)
return loss_D_fake + loss_D_real
def __call__(self) -> float:
self.logger = logging.getLogger()
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
fh = logging.FileHandler(
os.path.join(self.options.checkpoints_dir, "trainer.log")
)
formatter = logging.Formatter(
"%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"
)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.info("Creating trainer with the following options:")
for key, value in vars(self.options).items():
if key == "device":
value = value.type
elif key == "gpu_ids":
value = "cuda : " + str(value) if torch.cuda.is_available() else "cpu"
self.logger.info(f" {key:>25}: {'None' if value is None else value:<30}")
# Create Reconstructor Model
self.reconstructor = models.reconstruction.ReconstructorNetwork(
number_of_cascade_blocks=self.options.number_of_cascade_blocks,
n_downsampling=self.options.n_downsampling,
number_of_filters=self.options.number_of_reconstructor_filters,
number_of_layers_residual_bottleneck=self.options.number_of_layers_residual_bottleneck,
mask_embed_dim=self.options.mask_embed_dim,
dropout_probability=self.options.dropout_probability,
img_width=self.options.image_width,
use_deconv=self.options.use_deconv,
)
if self.options.device.type == "cuda":
self.reconstructor = torch.nn.DataParallel(self.reconstructor).to(
self.options.device
)
self.optimizers = {
"G": optim.Adam(
self.reconstructor.parameters(),
lr=self.options.lr,
betas=(self.options.beta1, 0.999),
)
}
# Create Evaluator Model
if self.options.use_evaluator:
self.evaluator = models.evaluator.EvaluatorNetwork(
number_of_filters=self.options.number_of_evaluator_filters,
number_of_conv_layers=self.options.number_of_evaluator_convolution_layers,
use_sigmoid=False,
width=self.options.image_width,
height=640 if self.options.dataroot == "KNEE_RAW" else None,
mask_embed_dim=self.options.mask_embed_dim,
)
self.evaluator = torch.nn.DataParallel(self.evaluator).to(
self.options.device
)
self.optimizers["D"] = optim.Adam(
self.evaluator.parameters(),
lr=self.options.lr,
betas=(self.options.beta1, 0.999),
)
train_loader, val_loader = self.get_loaders()
self.load_from_checkpoint_if_present()
self.load_weights_from_given_checkpoint()
writer = SummaryWriter(self.options.checkpoints_dir)
# Training engine and handlers
train_engine = Engine(lambda engine, batch: self.update(batch))
val_engine = Engine(lambda engine, batch: self.inference(batch))
validation_mse = Loss(
loss_fn=F.mse_loss,
output_transform=lambda x: (
x["reconstructed_image_magnitude"],
x["ground_truth_magnitude"],
),
)
validation_mse.attach(val_engine, name="mse")
validation_ssim = Loss(
loss_fn=util.common.compute_ssims,
output_transform=lambda x: (
x["reconstructed_image_magnitude"],
x["ground_truth_magnitude"],
),
)
validation_ssim.attach(val_engine, name="ssim")
if self.options.use_evaluator:
validation_loss_d = Loss(
loss_fn=self.discriminator_loss,
output_transform=lambda x: (
x["reconstructor_eval"],
x["ground_truth_eval"],
{
"reconstructed_image": x["reconstructed_image"],
"target": x["ground_truth"],
"mask": x["mask"],
},
),
)
validation_loss_d.attach(val_engine, name="loss_D")
progress_bar = ProgressBar()
progress_bar.attach(train_engine)
train_engine.add_event_handler(
Events.EPOCH_COMPLETED,
run_validation_and_update_best_checkpoint,
val_engine=val_engine,
progress_bar=progress_bar,
val_loader=val_loader,
trainer=self,
)
# Tensorboard Plots
@train_engine.on(Events.ITERATION_COMPLETED)
def plot_training_loss(engine):
writer.add_scalar(
"training/generator_loss",
engine.state.output["loss_G"],
self.updates_performed,
)
if "loss_D" in engine.state.output:
writer.add_scalar(
"training/discriminator_loss",
engine.state.output["loss_D"],
self.updates_performed,
)
@train_engine.on(Events.EPOCH_COMPLETED)
def plot_validation_loss(_):
writer.add_scalar(
"validation/MSE", val_engine.state.metrics["mse"], self.completed_epochs
)
writer.add_scalar(
"validation/SSIM",
val_engine.state.metrics["ssim"],
self.completed_epochs,
)
if "loss_D" in val_engine.state.metrics:
writer.add_scalar(
"validation/loss_D",
val_engine.state.metrics["loss_D"],
self.completed_epochs,
)
@train_engine.on(Events.EPOCH_COMPLETED)
def plot_validation_images(_):
ground_truth = val_engine.state.output["ground_truth_magnitude"]
zero_filled_image = val_engine.state.output["zero_filled_image_magnitude"]
reconstructed_image = val_engine.state.output[
"reconstructed_image_magnitude"
]
uncertainty_map = val_engine.state.output["uncertainty_map"]
difference = torch.abs(ground_truth - reconstructed_image)
# Create plots
ground_truth = util.common.create_grid_from_tensor(ground_truth)
writer.add_image(
"validation_images/ground_truth", ground_truth, self.completed_epochs
)
zero_filled_image = util.common.create_grid_from_tensor(zero_filled_image)
writer.add_image(
"validation_images/zero_filled_image",
zero_filled_image,
self.completed_epochs,
)
reconstructed_image = util.common.create_grid_from_tensor(
reconstructed_image
)
writer.add_image(
"validation_images/reconstructed_image",
reconstructed_image,
self.completed_epochs,
)
uncertainty_map = util.common.gray2heatmap(
util.common.create_grid_from_tensor(uncertainty_map.exp()),
cmap="jet",
)
writer.add_image(
"validation_images/uncertainty_map",
uncertainty_map,
self.completed_epochs,
)
difference = util.common.create_grid_from_tensor(difference)
difference = util.common.gray2heatmap(difference, cmap="gray")
writer.add_image(
"validation_images/difference", difference, self.completed_epochs
)
mask = util.common.create_grid_from_tensor(
val_engine.state.output["mask"].repeat(
1, 1, val_engine.state.output["mask"].shape[3], 1
)
)
writer.add_image(
"validation_images/mask_image", mask, self.completed_epochs
)
train_engine.add_event_handler(
Events.EPOCH_COMPLETED,
save_regular_checkpoint,
trainer=self,
progress_bar=progress_bar,
)
train_engine.run(train_loader, self.options.max_epochs - self.completed_epochs)
writer.close()
return self.best_validation_score
if __name__ == "__main__":
options_ = options.train_options.TrainOptions().parse()
options_.device = (
torch.device("cuda:{}".format(options_.gpu_ids[0]))
if options_.gpu_ids
else torch.device("cpu")
)
options_.checkpoints_dir = os.path.join(options_.checkpoints_dir, options_.name)
if not os.path.exists(options_.checkpoints_dir):
os.makedirs(options_.checkpoints_dir)
trainer_ = Trainer(options_)
trainer_()
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import base_options
class TrainOptions(base_options.BaseOptions):
def initialize(self, parser):
parser = base_options.BaseOptions.initialize(self, parser)
parser.add_argument(
"--beta1", type=float, default=0.5, help="momentum term of adam"
)
parser.add_argument(
"--lr", type=float, default=0.0002, help="initial learning rate for adam"
)
parser.add_argument(
"--mask_type",
type=str,
choices=[
"basic",
"symmetric_basic",
"low_to_high",
"grid",
"symmetric_grid",
"basic_rnl",
"symmetric_basic_rnl",
"low_to_high_rnl",
],
help="The type of mask to use.",
)
parser.add_argument(
"--rnl_params",
type=str,
default=None,
help="Characterizes the distribution of initial masks (when these are sampled, see "
"--train_with_fixed_initial_mask). "
"Format is min_lowf_lines,max_lowf_lines,highf_beta_alpha,highf_beta_beta. "
"Mask have a random number of low frequency lines active, uniform between "
"min_lowf_lines and max_lowf_lines. The remaining number of lines is determined by "
"a Beta(highf_beta_alpha, highf_beta_beta) distribution, which indicates the "
"proportion of the remaining lines to sample.",
)
parser.add_argument(
"--debug", action="store_true", help="Activates debug level messages."
)
parser.add_argument(
"--add_mask_eval",
action="store_true",
help="Sum mask values to observation in evaluator model.",
)
parser.add_argument("--weights_checkpoint", type=str, default=None)
# parser.add_argument("--validation_train_split_ratio", type=float, default=0.9)
parser.add_argument(
"--max_epochs",
type=int,
default=100,
help="number of epochs to train (default: 5)",
)
# parser.add_argument("--save_freq", type=int, default=200)
# Options for Reconstruction Model
parser.add_argument("--number_of_reconstructor_filters", type=int, default=128)
parser.add_argument("--dropout_probability", type=float, default=0)
parser.add_argument("--number_of_cascade_blocks", type=int, default=3)
parser.add_argument(
"--number_of_layers_residual_bottleneck", type=int, default=6
)
parser.add_argument("--n_downsampling", type=int, default=3)
parser.add_argument("--use_deconv", type=bool, default=True)
# Options for Evaluator Model
parser.add_argument(
"--no_evaluator", dest="use_evaluator", action="store_false"
)
parser.add_argument("--number_of_evaluator_filters", type=int, default=128)
parser.add_argument(
"--number_of_evaluator_convolution_layers", type=int, default=4
)
# Options for both Reconstructor and Evaluator Model
parser.add_argument("--mask_embed_dim", type=int, default=6)
parser.add_argument("--image_width", type=int, default=128)
# Options moved from old model file
parser.add_argument(
"--use_mse_as_disc_energy",
action="store_true",
help="use MSE as evaluator energy",
)
parser.add_argument(
"--grad_ctx",
action="store_true",
help="GAN criterion computes adversarial loss signal at provided k-space lines.",
)
parser.add_argument(
"--lambda_gan",
type=float,
default=0.01,
help="Weight for reconstruction loss.",
)
parser.add_argument("--gamma", type=int, default=100)
parser.add_argument(
"--only_evaluator", dest="only_evaluator", action="store_true"
)
return parser
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/options/train_options.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import base_options, train_options # noqa:F401
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/options/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
class BaseOptions:
def __init__(self):
self.initialized = False
self.parser = None
def initialize(self, parser):
parser.add_argument(
"--dataset_dir", required=True, help="Path to fastmri dataset."
)
parser.add_argument(
"--dataroot",
required=True,
help="Path to images (should have subfolders trainA, trainB, valA, valB, etc)",
)
parser.add_argument(
"--batchSize", type=int, default=1, help="Input batch size."
)
parser.add_argument(
"--gpu_ids",
type=str,
default="0",
help="GPU IDs: e.g. 0 0,1,2, 0,2. use -1 for CPU.",
)
parser.add_argument(
"--name",
type=str,
default="experiment_name",
help="Name of the experiment. It determines the sub folder where results are stored.",
)
parser.add_argument(
"--nThreads", default=4, type=int, help="Number of threads for data loader."
)
parser.add_argument(
"--checkpoints_dir",
type=str,
default="./checkpoints",
help="Root directory to save results and model checkpoints.",
)
parser.add_argument(
"--init_type",
type=str,
choices=["normal", "xavier", "kaiming", "orthogonal"],
default="normal",
help="Network weights initialization type.",
)
parser.add_argument(
"--num_volumes_train",
type=int,
default=None,
help="Number of MRI volumes to use for training.",
)
parser.add_argument(
"--num_volumes_val",
type=int,
default=None,
help="Number of MRI volumes to use for validation.",
)
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
parser = None
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
allow_abbrev=False,
)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ""
message += "----------------- Options ---------------\n"
for k, v in sorted(vars(opt).items()):
comment = ""
default = self.parser.get_default(k)
if v != default:
comment = "\t[default: %s]" % str(default)
message += "{:>25}: {:<30}{}\n".format(str(k), str(v), comment)
message += "----------------- End -------------------"
print(message)
def parse(self, silent=True):
opt = self.gather_options()
# set gpu ids
str_ids = opt.gpu_ids.split(",")
opt.gpu_ids = []
# for str_id in str_ids:
for str_id in range(len(str_ids)):
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
opt.batchSize *= len(opt.gpu_ids)
print(
f"Use multiple GPUs, batchSize are increased by {len(opt.gpu_ids)} "
f"times to {opt.batchSize}"
)
if not silent:
self.print_options(opt)
return opt
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/options/base_options.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import common
__all__ = ["common"]
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/util/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from typing import Dict, Optional
import matplotlib.pyplot as plt
import numpy as np
import skimage.measure
import torch
import torchvision.utils as tvutil
def load_checkpoint(checkpoint_path: str) -> Optional[Dict]:
if os.path.isfile(checkpoint_path):
logging.info(f"Found checkpoint at {checkpoint_path}.")
return torch.load(checkpoint_path)
logging.info(f"No checkpoint found at {checkpoint_path}.")
return None
def compute_ssims(xs, ys):
ssims = []
for i in range(xs.shape[0]):
ssim = skimage.measure.compare_ssim(
xs[i, 0].cpu().numpy(),
ys[i, 0].cpu().numpy(),
data_range=ys[i, 0].cpu().numpy().max(),
)
ssims.append(ssim)
return np.array(ssims).mean()
def compute_psnrs(xs, ys):
psnrs = []
for i in range(xs.shape[0]):
psnr = skimage.measure.compare_psnr(
xs[i, 0].cpu().numpy(),
ys[i, 0].cpu().numpy(),
data_range=ys[i, 0].cpu().numpy().max(),
)
psnrs.append(psnr)
return np.array(psnrs).mean()
def compute_mse(xs, ys):
return np.mean((ys.cpu().numpy() - xs.cpu().numpy()) ** 2)
def compute_nmse(xs, ys):
ys_numpy = ys.cpu().numpy()
return (
np.linalg.norm(ys_numpy - xs.cpu().numpy()) ** 2 / np.linalg.norm(ys_numpy) ** 2
)
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8, renormalize=True):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
# do normalization first, since we working on fourier space. we need to clamp
if renormalize:
image_tensor.add_(1).div_(2)
image_tensor.mul_(255).clamp_(0, 255)
if len(image_tensor.shape) == 4:
image_numpy = image_tensor[0].cpu().float().numpy()
else:
image_numpy = image_tensor.cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
return image_numpy.astype(imtype)
def create_grid_from_tensor(tensor_of_images, num_rows=4):
# take norm over real-imaginary dimension
# tensor_of_images = tensor_of_images.norm(dim=1, keepdim=True)
# make image grid
tensor_grid = tvutil.make_grid(
tensor_of_images, nrow=num_rows, normalize=True, scale_each=False
)
numpy_grid = tensor2im(tensor_grid, renormalize=False)
return numpy_grid
def gray2heatmap(grayimg, cmap="jet"):
cmap = plt.get_cmap(cmap)
rgba_img = cmap(grayimg)
# rgb_img = np.delete(rgba_img, 3, 2) * 255.0
rgb_img = rgba_img[:, :, :, 0] * 255.0
rgb_img = rgb_img.astype(np.uint8)
return rgb_img
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/util/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
cvpr19_models.models.reconstruction.py
======================================
MRI Reconstruction model as described in `Zhang, Zizhao, et al. "Reducing uncertainty in
undersampled mri reconstruction with active acquisition." Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition. 2019.`
"""
import functools
import torch
import torch.nn as nn
from . import fft_utils
def get_norm_layer(norm_type="instance"):
if norm_type == "batch":
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == "instance":
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
elif norm_type == "none":
norm_layer = None
else:
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
return norm_layer
def init_func(m):
init_type = "normal"
gain = 0.02
classname = m.__class__.__name__
if hasattr(m, "weight") and (
classname.find("Conv") != -1 or classname.find("Linear") != -1
):
if init_type == "normal":
torch.nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == "xavier":
torch.nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == "kaiming":
torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
elif init_type == "orthogonal":
torch.nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(
"initialization method [%s] is not implemented" % init_type
)
if hasattr(m, "bias") and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, gain)
torch.nn.init.constant_(m.bias.data, 0.0)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, dropout_probability, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(
dim, padding_type, norm_layer, dropout_probability, use_bias
)
def build_conv_block(
self, dim, padding_type, norm_layer, dropout_probability, use_bias
):
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True),
]
if dropout_probability > 0:
conv_block += [nn.Dropout(dropout_probability)]
p = 0
if padding_type == "reflect":
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ReconstructorNetwork(nn.Module):
"""Reconstructor network used in Zhang et al., CVPR'19.
Args:
number_of_encoder_input_channels(int): Number of input channels to the
reconstruction model.
number_of_decoder_output_channels(int): Number of output channels
of the reconstruction model.
number_of_filters(int): Number of convolutional filters.\n
dropout_probability(float): Dropout probability.
number_of_layers_residual_bottleneck (int): Number of residual
blocks in each model between two consecutive down-
or up-sampling operations.
number_of_cascade_blocks (int): Number of times the entire architecture is
replicated.
mask_embed_dim(int): Dimensionality of the mask embedding.
padding_type(str): Convolution operation padding type.
n_downsampling(int): Number of down-sampling operations.
img_width(int): The width of the image.
use_deconv(binary): Whether to use deconvolution in the up-sampling.
"""
def __init__(
self,
number_of_encoder_input_channels=2,
number_of_decoder_output_channels=3,
number_of_filters=128,
dropout_probability=0.0,
number_of_layers_residual_bottleneck=6,
number_of_cascade_blocks=3,
mask_embed_dim=6,
padding_type="reflect",
n_downsampling=3,
img_width=128,
use_deconv=True,
):
super(ReconstructorNetwork, self).__init__()
self.number_of_encoder_input_channels = number_of_encoder_input_channels
self.number_of_decoder_output_channels = number_of_decoder_output_channels
self.number_of_filters = number_of_filters
self.use_deconv = use_deconv
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.number_of_cascade_blocks = number_of_cascade_blocks
self.use_mask_embedding = True if mask_embed_dim > 0 else False
if self.use_mask_embedding:
number_of_encoder_input_channels += mask_embed_dim
print("[Reconstructor Network] -> use masked embedding condition")
# Lists of encoder, residual bottleneck and decoder blocks for all cascade blocks
self.encoders_all_cascade_blocks = nn.ModuleList()
self.residual_bottlenecks_all_cascade_blocks = nn.ModuleList()
self.decoders_all_cascade_blocks = nn.ModuleList()
# Architecture for the Cascade Blocks
for iii in range(1, self.number_of_cascade_blocks + 1):
# Encoder for iii_th cascade block
encoder = [
nn.ReflectionPad2d(1),
nn.Conv2d(
number_of_encoder_input_channels,
number_of_filters,
kernel_size=3,
stride=2,
padding=0,
bias=use_bias,
),
norm_layer(number_of_filters),
nn.ReLU(True),
]
for i in range(1, n_downsampling):
mult = 2 ** i
encoder += [
nn.ReflectionPad2d(1),
nn.Conv2d(
number_of_filters * mult // 2,
number_of_filters * mult,
kernel_size=3,
stride=2,
padding=0,
bias=use_bias,
),
norm_layer(number_of_filters * mult),
nn.ReLU(True),
]
self.encoders_all_cascade_blocks.append(nn.Sequential(*encoder))
# Bottleneck for iii_th cascade block
residual_bottleneck = []
mult = 2 ** (n_downsampling - 1)
for i in range(number_of_layers_residual_bottleneck):
residual_bottleneck += [
ResnetBlock(
number_of_filters * mult,
padding_type=padding_type,
norm_layer=norm_layer,
dropout_probability=dropout_probability,
use_bias=use_bias,
)
]
self.residual_bottlenecks_all_cascade_blocks.append(
nn.Sequential(*residual_bottleneck)
)
# Decoder for iii_th cascade block
decoder = []
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - 1 - i)
if self.use_deconv:
decoder += [
nn.ConvTranspose2d(
number_of_filters * mult,
int(number_of_filters * mult / 2),
kernel_size=4,
stride=2,
padding=1,
bias=use_bias,
),
norm_layer(int(number_of_filters * mult / 2)),
nn.ReLU(True),
]
else:
decoder += [nn.Upsample(scale_factor=2), nn.ReflectionPad2d(1)] + [
nn.Conv2d(
number_of_filters * mult,
int(number_of_filters * mult / 2),
kernel_size=3,
stride=1,
padding=0,
bias=use_bias,
),
norm_layer(int(number_of_filters * mult / 2)),
nn.ReLU(True),
]
decoder += [
nn.Conv2d(
number_of_filters // 2,
number_of_decoder_output_channels,
kernel_size=1,
padding=0,
bias=False,
)
] # better
self.decoders_all_cascade_blocks.append(nn.Sequential(*decoder))
if self.use_mask_embedding:
self.mask_embedding_layer = nn.Sequential(
nn.Conv2d(img_width, mask_embed_dim, 1, 1)
)
self.apply(init_func)
def data_consistency(self, x, input, mask):
ft_x = fft_utils.fft(x)
fuse = (
fft_utils.ifft(
torch.where((1 - mask).byte(), ft_x, torch.tensor(0.0).to(ft_x.device))
)
+ input
)
return fuse
def embed_mask(self, mask):
b, c, h, w = mask.shape
mask = mask.view(b, w, 1, 1)
cond_embed = self.mask_embedding_layer(mask)
return cond_embed
# noinspection PyUnboundLocalVariable
def forward(self, zero_filled_input, mask):
"""Generates reconstructions given images with partial k-space info.
Args:
zero_filled_input(torch.Tensor): Image obtained from zero-filled reconstruction
of partial k-space scans.
mask(torch.Tensor): Mask used in creating the zero filled image from ground truth
image.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor): Contains:\n
* Reconstructed high resolution image.
* Uncertainty map.
* Mask_embedding.
"""
if self.use_mask_embedding:
mask_embedding = self.embed_mask(mask)
mask_embedding = mask_embedding.repeat(
1, 1, zero_filled_input.shape[2], zero_filled_input.shape[3]
)
encoder_input = torch.cat([zero_filled_input, mask_embedding], 1)
else:
encoder_input = zero_filled_input
mask_embedding = None
residual_bottleneck_output = None
for cascade_block, (encoder, residual_bottleneck, decoder) in enumerate(
zip(
self.encoders_all_cascade_blocks,
self.residual_bottlenecks_all_cascade_blocks,
self.decoders_all_cascade_blocks,
)
):
encoder_output = encoder(encoder_input)
if cascade_block > 0:
# Skip connection from previous residual block
encoder_output = encoder_output + residual_bottleneck_output
residual_bottleneck_output = residual_bottleneck(encoder_output)
decoder_output = decoder(residual_bottleneck_output)
reconstructed_image = self.data_consistency(
decoder_output[:, :-1, ...], zero_filled_input, mask
)
uncertainty_map = decoder_output[:, -1:, :, :]
if self.use_mask_embedding:
encoder_input = torch.cat([reconstructed_image, mask_embedding], 1)
else:
encoder_input = reconstructed_image
return reconstructed_image, uncertainty_map, mask_embedding
def init_from_checkpoint(self, checkpoint):
if not isinstance(self, nn.DataParallel):
self.load_state_dict(
{
# This assumes that environment code runs in a single GPU
key.replace("module.", ""): val
for key, val in checkpoint["reconstructor"].items()
}
)
else:
self.load_state_dict(checkpoint["reconstructor"])
return checkpoint["options"]
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/models/reconstruction.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import evaluator, fft_utils, reconstruction # noqa: F401
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
def roll(x, shift, dim):
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
# note that for IFFT we do not use irfft
# this function returns two channels where the first one (real part) is in image space
def ifftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifft(x, normalized=False, ifft_shift=False):
x = x.permute(0, 2, 3, 1)
y = torch.ifft(x, 2, normalized=normalized)
if ifft_shift:
y = ifftshift(y, dim=(1, 2))
return y.permute(0, 3, 1, 2)
def rfft(x, normalized=False):
# x is in gray scale and has 1-d in the 1st dimension
x = x.squeeze(1)
y = torch.rfft(x, 2, onesided=False, normalized=normalized)
return y.permute(0, 3, 1, 2)
def fft(x, normalized=False, shift=False):
x = x.permute(0, 2, 3, 1)
if shift:
x = fftshift(x, dim=(1, 2))
y = torch.fft(x, 2, normalized=normalized)
return y.permute(0, 3, 1, 2)
def center_crop(x, shape):
assert 0 < shape[0] <= x.shape[-2]
assert 0 < shape[1] <= x.shape[-1]
w_from = (x.shape[-1] - shape[0]) // 2
h_from = (x.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
x = x[..., h_from:h_to, w_from:w_to]
return x
def to_magnitude(tensor):
tensor = (tensor[:, 0, :, :] ** 2 + tensor[:, 1, :, :] ** 2) ** 0.5
return tensor.unsqueeze(1)
def dicom_to_0_1_range(tensor):
return (tensor.clamp(-3, 3) + 3) / 6
def gaussian_nll_loss(reconstruction, target, logvar, options):
reconstruction = to_magnitude(reconstruction)
target = to_magnitude(target)
if options.dataroot == "KNEE_RAW":
reconstruction = center_crop(reconstruction, [320, 320])
target = center_crop(target, [320, 320])
logvar = center_crop(logvar, [320, 320])
l2 = F.mse_loss(reconstruction, target, reduce=False)
# Clip logvar to make variance in [0.0001, 5], for numerical stability
logvar = logvar.clamp(-9.2, 1.609)
one_over_var = torch.exp(-logvar)
assert len(l2) == len(logvar)
return 0.5 * (one_over_var * l2 + logvar)
def preprocess_inputs(batch, dataroot, device, prev_reconstruction=None):
mask = batch[0].to(device)
target = batch[1].to(device)
if dataroot == "KNEE_RAW":
k_space = batch[2].permute(0, 3, 1, 2).to(device)
# alter mask to always include the highest frequencies that include padding
mask = torch.where(
to_magnitude(k_space).sum(2).unsqueeze(2) == 0.0,
torch.tensor(1.0).to(device),
mask,
)
if prev_reconstruction is None:
masked_true_k_space = torch.where(
mask.byte(), k_space, torch.tensor(0.0).to(device)
)
else:
prev_reconstruction = prev_reconstruction.clone()
prev_reconstruction[:, :, :160, :] = 0
prev_reconstruction[:, :, -160:, :] = 0
prev_reconstruction[:, :, :, :24] = 0
prev_reconstruction[:, :, :, -24:] = 0
ft_x = fft(prev_reconstruction, shift=True)
masked_true_k_space = torch.where(mask.byte(), k_space, ft_x)
reconstructor_input = ifft(masked_true_k_space, ifft_shift=True)
target = target.permute(0, 3, 1, 2)
else:
fft_target = fft(target)
if prev_reconstruction is None:
masked_true_k_space = torch.where(
mask.byte(), fft_target, torch.tensor(0.0).to(device)
)
else:
ft_x = fft(prev_reconstruction)
masked_true_k_space = torch.where(mask.byte(), fft_target, ft_x)
reconstructor_input = ifft(masked_true_k_space)
return reconstructor_input, target, mask
class GANLossKspace(nn.Module):
def __init__(
self,
use_lsgan=True,
use_mse_as_energy=False,
grad_ctx=False,
gamma=100,
options=None,
):
super(GANLossKspace, self).__init__()
# self.register_buffer('real_label', torch.ones(imSize, imSize))
# self.register_buffer('fake_label', torch.zeros(imSize, imSize))
self.grad_ctx = grad_ctx
self.options = options
if use_lsgan:
self.loss = nn.MSELoss(size_average=False)
else:
self.loss = nn.BCELoss(size_average=False)
self.use_mse_as_energy = use_mse_as_energy
if use_mse_as_energy:
self.gamma = gamma
self.bin = 5
def get_target_tensor(self, input, target_is_real, degree, mask, pred_and_gt=None):
if target_is_real:
target_tensor = torch.ones_like(input)
target_tensor[:] = degree
else:
target_tensor = torch.zeros_like(input)
if not self.use_mse_as_energy:
if degree != 1:
target_tensor[:] = degree
else:
pred, gt = pred_and_gt
if self.options.dataroot == "KNEE_RAW":
gt = center_crop(gt, [368, 320])
pred = center_crop(pred, [368, 320])
w = gt.shape[2]
ks_gt = fft(gt, normalized=True)
ks_input = fft(pred, normalized=True)
ks_row_mse = F.mse_loss(ks_input, ks_gt, reduce=False).sum(
1, keepdim=True
).sum(2, keepdim=True).squeeze() / (2 * w)
energy = torch.exp(-ks_row_mse * self.gamma)
target_tensor[:] = energy
# force observed part to always
for i in range(mask.shape[0]):
idx = torch.nonzero(mask[i, 0, 0, :])
target_tensor[i, idx] = 1
return target_tensor
def __call__(
self, input, target_is_real, mask, degree=1, updateG=False, pred_and_gt=None
):
# input [B, imSize]
# degree is the realistic degree of output
# set updateG to True when training G.
target_tensor = self.get_target_tensor(
input, target_is_real, degree, mask, pred_and_gt
)
b, w = target_tensor.shape
if updateG and not self.grad_ctx:
mask_ = mask.squeeze()
# maskout the observed part loss
masked_input = torch.where(
(1 - mask_).byte(), input, torch.tensor(0.0).to(input.device)
)
masked_target = torch.where(
(1 - mask_).byte(), target_tensor, torch.tensor(0.0).to(input.device)
)
return self.loss(masked_input, masked_target) / (1 - mask_).sum()
else:
return self.loss(input, target_tensor) / (b * w)
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/models/fft_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
cvpr19_models.models.evaluator.py
=================================
Active acquisition model as described in `Zhang, Zizhao, et al. "Reducing uncertainty in
undersampled mri reconstruction with active acquisition." Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition. 2019.`
"""
import functools
from typing import Optional
import torch
import torch.nn as nn
from . import fft_utils, reconstruction
class SimpleSequential(nn.Module):
def __init__(self, net1, net2):
super(SimpleSequential, self).__init__()
self.net1 = net1
self.net2 = net2
def forward(self, x, mask):
output = self.net1(x, mask)
return self.net2(output, mask)
class SpectralMapDecomposition(nn.Module):
def __init__(self):
super(SpectralMapDecomposition, self).__init__()
def forward(self, reconstructed_image, mask_embedding, mask):
batch_size = reconstructed_image.shape[0]
height = reconstructed_image.shape[2]
width = reconstructed_image.shape[3]
# create spectral maps in kspace
kspace = fft_utils.fft(reconstructed_image)
kspace = kspace.unsqueeze(1).repeat(1, width, 1, 1, 1)
# separate image into spectral maps
separate_mask = torch.zeros([1, width, 1, 1, width], dtype=torch.float32)
for i in range(width):
separate_mask[0, i, 0, 0, i] = 1
separate_mask = separate_mask.to(reconstructed_image.device)
masked_kspace = torch.where(
separate_mask.byte(), kspace, torch.tensor(0.0).to(kspace.device)
)
masked_kspace = masked_kspace.view(batch_size * width, 2, height, width)
# convert spectral maps to image space
separate_images = fft_utils.ifft(masked_kspace)
# result is (batch, [real_M0, img_M0, real_M1, img_M1, ...], height, width]
separate_images = separate_images.contiguous().view(
batch_size, 2, width, height, width
)
# add mask information as a summation -- might not be optimal
if mask is not None:
separate_images = (
separate_images + mask.permute(0, 3, 1, 2).unsqueeze(1).detach()
)
separate_images = separate_images.contiguous().view(
batch_size, 2 * width, height, width
)
# concatenate mask embedding
if mask_embedding is not None:
spectral_map = torch.cat([separate_images, mask_embedding], dim=1)
else:
spectral_map = separate_images
return spectral_map
class EvaluatorNetwork(nn.Module):
"""Evaluator network used in Zhang et al., CVPR'19.
Args:
number_of_filters(int): Number of filters used in convolutions. Defaults to 256. \n
number_of_conv_layers(int): Depth of the model defined as a number of
convolutional layers. Defaults to 4.
use_sigmoid(bool): Whether the sigmoid non-linearity is applied to the
output of the network. Defaults to False.
width(int): The width of the image. Defaults to 128 (corresponds to DICOM).
height(Optional[int]): The height of the image. If ``None`` the value of ``width``.
is used. Defaults to ``None``.
mask_embed_dim(int): Dimensionality of the mask embedding.
num_output_channels(Optional[int]): The dimensionality of the output. If ``None``,
the value of ``width`` is used. Defaults to ``None``.
"""
def __init__(
self,
number_of_filters: int = 256,
number_of_conv_layers: int = 4,
use_sigmoid: bool = False,
width: int = 128,
height: Optional[int] = None,
mask_embed_dim: int = 6,
num_output_channels: Optional[int] = None,
):
print(f"[EvaluatorNetwork] -> n_layers = {number_of_conv_layers}")
super(EvaluatorNetwork, self).__init__()
self.spectral_map = SpectralMapDecomposition()
self.mask_embed_dim = mask_embed_dim
if height is None:
height = width
number_of_input_channels = 2 * width + mask_embed_dim
norm_layer = functools.partial(
nn.InstanceNorm2d, affine=False, track_running_stats=False
)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
sequence = [
nn.Conv2d(
number_of_input_channels,
number_of_filters,
kernel_size=4,
stride=2,
padding=1,
),
nn.LeakyReLU(0.2, True),
]
in_channels = number_of_filters
for n in range(1, number_of_conv_layers):
if n < number_of_conv_layers - 1:
if n <= 4:
out_channels = in_channels * 2
else:
out_channels = in_channels // 2
else:
out_channels = in_channels
sequence += [
nn.Conv2d(
in_channels,
out_channels,
kernel_size=4,
stride=2,
padding=1,
bias=use_bias,
),
norm_layer(out_channels),
nn.LeakyReLU(0.2, True),
]
in_channels = out_channels
kernel_size_width = width // 2 ** number_of_conv_layers
kernel_size_height = height // 2 ** number_of_conv_layers
sequence += [nn.AvgPool2d(kernel_size=(kernel_size_height, kernel_size_width))]
if num_output_channels is None:
num_output_channels = width
sequence += [
nn.Conv2d(
in_channels, num_output_channels, kernel_size=1, stride=1, padding=0
)
]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
self.apply(reconstruction.init_func)
def forward(
self,
input_tensor: torch.Tensor,
mask_embedding: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
):
"""Computes scores for each k-space column.
Args:
input_tensor(torch.Tensor): Batch of reconstructed images,
as produced by :class:`models.reconstruction.ReconstructorNetwork`.
mask_embedding(Optional[torch.Tensor]): Corresponding batch of mask embeddings
produced by :class:`models.reconstruction.ReconstructorNetwork`, if needed.
mask(Optional[torch.Tensor]): Corresponding masks arrays, if needed.
Returns:
torch.Tensor: Evaluator score for each k-space column in each image in the batch.
"""
spectral_map_and_mask_embedding = self.spectral_map(
input_tensor, mask_embedding, mask
)
return self.model(spectral_map_and_mask_embedding).squeeze(3).squeeze(2)
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/models/evaluator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
import torch.utils.data
import activemri.experimental.cvpr19_models.models.fft_utils as fft_utils
class Slice(torch.utils.data.Dataset):
def __init__(
self,
transform,
dicom_root,
which="train",
resolution=320,
scan_type=None,
num_volumes=None,
num_rand_slices=None,
):
self.transform = transform
self.dataset = _DicomDataset(
dicom_root / str(resolution) / which, scan_type, num_volumes=num_volumes
)
self.num_slices = self.dataset.metadata["num_slices"]
self.num_rand_slices = num_rand_slices
self.rng = np.random.RandomState()
def __getitem__(self, i):
i = int(i)
if self.num_rand_slices is None:
volume_i, slice_i = divmod(i, self.num_slices)
else:
volume_i = (i * self.num_slices // self.num_rand_slices) // self.num_slices
slice_ids = list(range(self.num_slices))
self.rng.seed(seed=volume_i)
self.rng.shuffle(slice_ids)
slice_i = slice_ids[i % self.num_rand_slices]
volume, volume_metadata = self.dataset[volume_i]
slice = volume[slice_i : slice_i + 1]
slice = slice.astype(np.float32)
return self.transform(slice, volume_metadata["mean"], volume_metadata["std"])
def __len__(self):
if self.num_rand_slices is None:
return len(self.dataset) * self.num_slices
else:
return len(self.dataset) * self.num_rand_slices
class DicomDataTransform:
# If `seed` is none and `seed_per_image` is True, masks will be generated with a unique seed
# per image, computed as `seed = int( 1009 * image.sum().abs())`.
def __init__(self, mask_func, fixed_seed=None, seed_per_image=False):
self.mask_func = mask_func
self.fixed_seed = fixed_seed
self.seed_per_image = seed_per_image
def __call__(self, image, mean, std):
image = (image - mean) / (std + 1e-12)
image = torch.from_numpy(image)
image = fft_utils.dicom_to_0_1_range(image)
shape = np.array(image.shape)
seed = (
int(1009 * image.sum().abs())
if self.fixed_seed is None and self.seed_per_image
else self.fixed_seed
)
mask = self.mask_func(shape, seed) if self.mask_func is not None else None
image = torch.cat([image, torch.zeros_like(image)], dim=0)
return mask, image
class _DicomDataset:
def __init__(self, root, scan_type=None, num_volumes=None):
self.metadata = json.load(open(os.path.join(root, "metadata.json")))
shape = (
len(self.metadata["volumes"]),
self.metadata["num_slices"],
self.metadata["resolution"],
self.metadata["resolution"],
)
self.volumes = np.memmap(
os.path.join(root, "data.bin"), self.metadata["dtype"], "r"
).reshape(shape)
volume_ids = []
for i, volume in enumerate(self.metadata["volumes"]):
if scan_type == "all" or volume["scan_type"] == scan_type:
volume_ids.append(i)
if num_volumes is not None:
rng = np.random.RandomState(1234)
rng.shuffle(volume_ids)
volume_ids = volume_ids[:num_volumes]
self.volume_ids = {i: id for i, id in enumerate(volume_ids)}
def __getitem__(self, i):
""" returns (data: 4d array, metadata: dict) """
id = self.volume_ids[i]
return self.volumes[id], self.metadata["volumes"][id]
def __len__(self):
return len(self.volume_ids)
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/data/dicom_data_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pathlib
import numpy as np
import torch
import torch.utils.data
from . import dicom_data_loader, masking_utils, raw_data_loader
def get_train_valid_loader(
dataset_dir,
batch_size,
num_workers=4,
pin_memory=False,
which_dataset="KNEE",
mask_type="basic",
rnl_params=None,
num_volumes_train=None,
num_volumes_val=None,
):
if which_dataset == "KNEE":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
dicom_root = pathlib.Path(dataset_dir)
data_transform = dicom_data_loader.DicomDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
train_data = dicom_data_loader.Slice(
data_transform,
dicom_root,
which="train",
resolution=128,
scan_type="all",
num_volumes=num_volumes_train,
num_rand_slices=None,
)
valid_data = dicom_data_loader.Slice(
data_transform,
dicom_root,
which="val",
resolution=128,
scan_type="all",
num_volumes=num_volumes_val,
num_rand_slices=None,
)
elif which_dataset == "KNEE_RAW":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
raw_root = dataset_dir
if not os.path.isdir(raw_root):
raise ImportError(raw_root + " not exists. Change to the right path.")
data_transform = raw_data_loader.RawDataTransform(
mask_func, fixed_seed=None, seed_per_image=False
)
train_data = raw_data_loader.RawSliceData(
os.path.join(raw_root, "knee_singlecoil_train"),
transform=data_transform,
num_cols=368,
num_volumes=num_volumes_train,
)
data_transform = raw_data_loader.RawDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
valid_data = raw_data_loader.RawSliceData(
os.path.join(raw_root, "knee_singlecoil_val"),
transform=data_transform,
num_cols=368,
num_volumes=num_volumes_val,
custom_split="val",
)
else:
raise ValueError
def init_fun(_):
return np.random.seed(None)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
sampler=None,
shuffle=True,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=batch_size,
sampler=None,
shuffle=True,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
return train_loader, valid_loader
def get_test_loader(
dataset_dir,
batch_size,
num_workers=2,
pin_memory=False,
which_dataset="KNEE",
mask_type="basic",
rnl_params=None,
):
if which_dataset == "KNEE":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
dicom_root = pathlib.Path(dataset_dir)
data_transform = dicom_data_loader.DicomDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
test_data = dicom_data_loader.Slice(
data_transform,
dicom_root,
which="public_leaderboard",
resolution=128,
scan_type="all",
num_volumes=None,
num_rand_slices=None,
)
def init_fun(_):
return np.random.seed()
data_loader = torch.utils.data.DataLoader(
test_data,
batch_size=batch_size,
sampler=None,
shuffle=False,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
elif which_dataset == "KNEE_RAW":
mask_func = masking_utils.get_mask_func(
mask_type, which_dataset, rnl_params=rnl_params
)
raw_root = dataset_dir
if not os.path.isdir(raw_root):
raise ImportError(raw_root + " not exists. Change to the right path.")
data_transform = raw_data_loader.RawDataTransform(
mask_func, fixed_seed=None, seed_per_image=True
)
test_data = raw_data_loader.RawSliceData(
raw_root + "/knee_singlecoil_val",
transform=data_transform,
num_cols=368,
custom_split="test",
)
def init_fun(_):
return np.random.seed(None)
data_loader = torch.utils.data.DataLoader(
test_data,
batch_size=batch_size,
sampler=None,
shuffle=False,
num_workers=num_workers,
worker_init_fn=init_fun,
pin_memory=pin_memory,
drop_last=True,
)
else:
raise ValueError
return data_loader
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/data/base_data_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import base_data_loader
def create_data_loaders(options, is_test=False):
if not is_test:
train_loader, valid_loader = base_data_loader.get_train_valid_loader(
options.dataset_dir,
batch_size=options.batchSize,
num_workers=options.nThreads,
pin_memory=True,
which_dataset=options.dataroot,
mask_type=options.mask_type,
rnl_params=options.rnl_params,
num_volumes_train=options.num_volumes_train,
num_volumes_val=options.num_volumes_val,
)
return train_loader, valid_loader
else:
test_loader = base_data_loader.get_test_loader(
options.dataset_dir,
batch_size=options.batchSize,
num_workers=0,
pin_memory=True,
which_dataset=options.dataroot,
mask_type=options.mask_type,
rnl_params=options.rnl_params,
)
return test_loader
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
def get_mask_func(mask_type, which_dataset, rnl_params=None):
# Whether the number of lines is random or not
random_num_lines = mask_type[-4:] == "_rnl"
if "symmetric_basic" in mask_type:
logging.info(
f"Mask is symmetric uniform choice with random_num_lines={random_num_lines}."
)
return SymmetricUniformChoiceMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "basic" in mask_type:
# First two parameters are ignored if `random_num_lines` is True
logging.info(
f"Mask is fixed acceleration mask with random_num_lines={random_num_lines}."
)
return BasicMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "low_to_high" in mask_type:
logging.info(
f"Mask is symmetric low to high with random_num_lines={random_num_lines}."
)
return SymmetricLowToHighMaskFunc(
[0.125],
[4],
which_dataset,
random_num_lines=random_num_lines,
rnl_params=rnl_params,
)
if "symmetric_grid" in mask_type:
logging.info("Mask is symmetric grid.")
return SymmetricUniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
if "grid" in mask_type:
logging.info("Mask is grid (not symmetric).")
return UniformGridMaskFunc(
[], [], which_dataset, random_num_lines=True, rnl_params=rnl_params
)
raise ValueError(f"Invalid mask type: {mask_type}.")
class MaskFunc:
def __init__(
self,
center_fractions,
accelerations,
which_dataset,
random_num_lines=False,
rnl_params=None,
):
if len(center_fractions) != len(accelerations):
raise ValueError(
"Number of center fractions should match number of accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.random_num_lines = random_num_lines
if rnl_params is None:
# The lines below give approx. 4x acceleration on average.
self.min_lowf_lines = 10 if which_dataset != "KNEE_RAW" else 30
self.max_lowf_lines = 12 if which_dataset != "KNEE_RAW" else 32
self.highf_beta_alpha = 1
self.highf_beta_beta = 5
else:
params = [int(x) for x in rnl_params.split(",")]
assert len(params) == 4
self.min_lowf_lines = params[0]
self.max_lowf_lines = params[1]
self.highf_beta_alpha = params[2]
self.highf_beta_beta = params[3]
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
self.rng.seed(seed)
num_cols = shape[-2]
# Determine number of low and high frequency lines to scan
if self.random_num_lines:
# These are guaranteed to be an even number (useful for symmetric masks)
num_low_freqs = self.rng.choice(
range(self.min_lowf_lines, self.max_lowf_lines, 2)
)
num_high_freqs = (
int(
self.rng.beta(self.highf_beta_alpha, self.highf_beta_beta)
* (num_cols - num_low_freqs)
// 2
)
* 2
)
else:
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
num_low_freqs = int(round(num_cols * center_fraction))
num_high_freqs = int(num_cols // acceleration - num_low_freqs)
# Create the mask
mask = self.create_lf_focused_mask(num_cols, num_high_freqs, num_low_freqs)
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-1] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
p = num_high_freqs / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < p
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
return mask
class BasicMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
hf_cols[hf_cols >= (num_cols - num_low_freqs + 1) // 2] += num_low_freqs
mask[hf_cols] = True
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformChoiceMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
hf_cols = self.rng.choice(
np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False
)
mask[hf_cols] = True
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class UniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[: num_low_freqs // 2] = mask[-(num_low_freqs // 2) :] = True
return mask
class SymmetricLowToHighMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
num_cols //= 2
num_low_freqs //= 2
num_high_freqs //= 2
num_low_freqs += num_high_freqs
pad = num_cols - num_low_freqs
mask[pad:num_cols] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
mask = np.fft.ifftshift(mask, axes=0)
return mask
class SymmetricUniformGridMaskFunc(MaskFunc):
def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):
mask = np.zeros([num_cols])
acceleration = self.rng.choice([4, 8, 16])
num_cols //= 2
num_low_freqs //= 2
hf_cols = np.arange(acceleration, num_cols, acceleration)
mask[hf_cols] = True
mask[:num_low_freqs] = True
mask[: -(num_cols + 1) : -1] = mask[:num_cols]
return mask
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/data/masking_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
import h5py
import numpy as np
import torch
import torch.utils.data
def ifftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fftshift(x, dim=None):
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def roll(x, shift, dim):
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
class RawSliceData(torch.utils.data.Dataset):
def __init__(
self,
root,
transform,
num_cols=None,
num_volumes=None,
num_rand_slices=None,
custom_split=None,
):
self.transform = transform
self.examples = []
self.num_rand_slices = num_rand_slices
self.rng = np.random.RandomState(1234)
files = []
for fname in list(pathlib.Path(root).iterdir()):
data = h5py.File(fname, "r")
if num_cols is not None and data["kspace"].shape[2] != num_cols:
continue
files.append(fname)
if custom_split is not None:
split_info = []
with open(f"data/splits/raw_{custom_split}.txt") as f:
for line in f:
split_info.append(line.rsplit("\n")[0])
files = [f for f in files if f.name in split_info]
if num_volumes is not None:
self.rng.shuffle(files)
files = files[:num_volumes]
for volume_i, fname in enumerate(sorted(files)):
data = h5py.File(fname, "r")
kspace = data["kspace"]
if num_rand_slices is None:
num_slices = kspace.shape[0]
self.examples += [(fname, slice) for slice in range(num_slices)]
else:
slice_ids = list(range(kspace.shape[0]))
self.rng.seed(seed=volume_i)
self.rng.shuffle(slice_ids)
self.examples += [
(fname, slice) for slice in slice_ids[:num_rand_slices]
]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice = self.examples[i]
with h5py.File(fname, "r") as data:
kspace = data["kspace"][slice]
return self.transform(kspace, data.attrs)
class RawDataTransform:
def __init__(self, mask_func, fixed_seed=None, seed_per_image=False):
self.mask_func = mask_func
self.fixed_seed = fixed_seed
self.seed_per_image = seed_per_image
def __call__(self, kspace, attrs):
kspace = torch.from_numpy(np.stack([kspace.real, kspace.imag], axis=-1))
kspace = ifftshift(kspace, dim=(0, 1))
image = torch.ifft(kspace, 2, normalized=False)
image = ifftshift(image, dim=(0, 1))
# norm = torch.sqrt(image[..., 0] ** 2 + image[..., 1] ** 2).max()
# 5.637766165023095e-08, 7.072103529760345e-07, 5.471710210258607e-06
# normalize by the mean norm of training images.
image /= 7.072103529760345e-07
kspace /= 7.072103529760345e-07
shape = np.array(kspace.shape)
seed = (
int(1009 * image.sum().abs())
if self.fixed_seed is None and self.seed_per_image
else self.fixed_seed
)
mask = self.mask_func(shape, seed)
return mask, image, kspace
| active-mri-acquisition-main | activemri/experimental/cvpr19_models/data/raw_data_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import fastmri.models
import torch
import activemri.models
# noinspection PyAbstractClass
class Unet(activemri.models.Reconstructor):
def __init__(
self, in_chans=2, out_chans=2, chans=32, num_pool_layers=4, drop_prob=0.0
):
super().__init__()
self.unet = fastmri.models.Unet(
in_chans,
out_chans,
chans=chans,
num_pool_layers=num_pool_layers,
drop_prob=drop_prob,
)
def forward( # type: ignore
self, image: torch.Tensor, mean: torch.Tensor, std: torch.Tensor
) -> Dict[str, Any]:
output = self.unet(image).squeeze(1)
std = std.unsqueeze(1).unsqueeze(2)
mean = mean.unsqueeze(1).unsqueeze(2)
reconstruction = output * std + mean
return {"reconstruction": reconstruction}
def init_from_checkpoint(self, checkpoint: Dict[str, Any]) -> Optional[Any]:
self.load_state_dict(checkpoint["state_dict"])
return None
| active-mri-acquisition-main | activemri/models/fastmri_unet_wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, Dict, Optional
import torch.nn
class Reconstructor(torch.nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward(self, *args, **kwargs) -> Dict[str, Any]:
pass
@abc.abstractmethod
def init_from_checkpoint(self, checkpoint: Dict[str, Any]) -> Optional[Any]:
pass
| active-mri-acquisition-main | activemri/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch
import activemri.experimental.cvpr19_models.models.reconstruction as cvpr19_reconstruction
import activemri.models
# This is just a wrapper for the model in cvpr19_models folder
class CVPR19Reconstructor(activemri.models.Reconstructor):
def __init__(
self,
number_of_encoder_input_channels: int = 2,
number_of_decoder_output_channels: int = 3,
number_of_filters: int = 128,
dropout_probability: float = 0.0,
number_of_layers_residual_bottleneck: int = 6,
number_of_cascade_blocks: int = 3,
mask_embed_dim: int = 6,
padding_type: str = "reflect",
n_downsampling: int = 3,
img_width: int = 128,
use_deconv: bool = True,
):
super().__init__()
self.reconstructor = cvpr19_reconstruction.ReconstructorNetwork(
number_of_encoder_input_channels=number_of_encoder_input_channels,
number_of_decoder_output_channels=number_of_decoder_output_channels,
number_of_filters=number_of_filters,
dropout_probability=dropout_probability,
number_of_layers_residual_bottleneck=number_of_layers_residual_bottleneck,
number_of_cascade_blocks=number_of_cascade_blocks,
mask_embed_dim=mask_embed_dim,
padding_type=padding_type,
n_downsampling=n_downsampling,
img_width=img_width,
use_deconv=use_deconv,
)
def forward( # type: ignore
self, zero_filled_input: torch.Tensor, mask: torch.Tensor
) -> Dict[str, Any]:
reconstructed_image, uncertainty_map, mask_embedding = self.reconstructor(
zero_filled_input, mask
)
reconstructed_image = reconstructed_image.permute(0, 2, 3, 1)
uncertainty_map = uncertainty_map.permute(0, 2, 3, 1)
return {
"reconstruction": reconstructed_image,
"uncertainty_map": uncertainty_map,
"mask_embedding": mask_embedding,
}
def init_from_checkpoint(self, checkpoint: Dict[str, Any]):
return self.reconstructor.init_from_checkpoint(checkpoint)
| active-mri-acquisition-main | activemri/models/cvpr19_reconstructor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.baselines.ddqn.py
=======================================
Baseline implementation of Double DQN, as described in
*Van Hasselt, Hado, Arthur Guez, and David Silver. "Deep reinforcement learning with
double q-learning." arXiv preprint arXiv:1509.06461 (2015)*.
"""
import argparse
import logging
import math
import os
import pickle
import random
import sys
import time
from typing import Any, Dict, List, Optional, Tuple
import filelock
import numpy as np
import tensorboardX
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import activemri.envs.envs as mri_envs
import activemri.experimental.cvpr19_models.models.evaluator as cvpr19_evaluator
from . import Policy, RandomPolicy, evaluation, replay_buffer
def _encode_obs_dict(obs: Dict[str, Any]) -> torch.Tensor:
reconstruction = obs["reconstruction"].permute(0, 3, 1, 2)
mask_embedding = obs["extra_outputs"]["mask_embedding"]
mask = obs["mask"]
batch_size, num_channels, img_height, img_width = reconstruction.shape
transformed_obs = torch.zeros(
batch_size, num_channels, img_height + 2, img_width
).float()
transformed_obs[..., :img_height, :] = reconstruction
# The second to last row is the mask
transformed_obs[..., img_height, :] = mask.unsqueeze(1)
# The last row is the mask embedding (padded with 0s if necessary)
if mask_embedding:
mask_embed_dim = len(mask_embedding[0])
transformed_obs[..., img_height + 1, :mask_embed_dim] = mask_embedding[
:, :, 0, 0
]
else:
transformed_obs[:, :, img_height + 1, 0] = np.nan
return transformed_obs
def _decode_obs_tensor(
obs_tensor: torch.Tensor, mask_embed_dim: int
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
reconstruction = obs_tensor[..., :-2, :]
bs = obs_tensor.shape[0]
if torch.isnan(obs_tensor[0, 0, -1, 0]).item() == 1:
assert mask_embed_dim == 0
mask_embedding = None
else:
mask_embedding = obs_tensor[:, 0, -1, :mask_embed_dim].view(bs, -1, 1, 1)
mask_embedding = mask_embedding.repeat(
1, 1, reconstruction.shape[2], reconstruction.shape[3]
)
mask = obs_tensor[:, 0, -2, :]
mask = mask.contiguous().view(bs, 1, 1, -1)
return reconstruction, mask, mask_embedding
def _get_epsilon(steps_done, opts):
return opts.epsilon_end + (opts.epsilon_start - opts.epsilon_end) * math.exp(
-1.0 * steps_done / opts.epsilon_decay
)
# noinspection PyAbstractClass
class Flatten(nn.Module):
# noinspection PyMethodMayBeStatic
def forward(self, x):
return x.view(x.size(0), -1)
class SimpleMLP(nn.Module):
""" Value network used for dataset specific DDQN model. """
def __init__(
self,
budget: int,
image_width: int,
num_hidden_layers: int = 2,
hidden_size: int = 32,
ignore_mask: bool = True,
):
super().__init__()
self.ignore_mask = ignore_mask
self.num_inputs = budget if self.ignore_mask else image_width
num_actions = image_width
self.linear1 = nn.Sequential(nn.Linear(self.num_inputs, hidden_size), nn.ReLU())
hidden_layers = []
for i in range(num_hidden_layers):
hidden_layers.append(
nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ReLU())
)
self.hidden = nn.Sequential(*hidden_layers)
self.output = nn.Linear(hidden_size, num_actions)
self.model = nn.Sequential(self.linear1, self.hidden, self.output)
def forward(self, obs: torch.Tensor) -> torch.Tensor:
"""Predicts action values.
Args:
obs(torch.Tensor): The observation tensor. Once decoded, it only uses the mask
information. If ``__init__(..., ignore_mask=True)``, it will
additionally use the mask only to deduce the time step.
Returns:
torch.Tensor: Q-values for all actions at the given observation.
Note:
Values corresponding to active k-space columns in the observation are manually
set to ``1e-10``.
"""
_, mask, _ = _decode_obs_tensor(obs, 0)
previous_actions = mask.squeeze()
if self.ignore_mask:
input_tensor = torch.zeros(obs.shape[0], self.num_inputs).to(obs.device)
time_steps = previous_actions.sum(1).unsqueeze(1)
# We allow the model to receive observations that are over budget during test
# Code below randomizes the input to the model for these observations
index_over_budget = (time_steps >= self.num_inputs).squeeze()
time_steps = time_steps.clamp(0, self.num_inputs - 1)
input_tensor.scatter_(1, time_steps.long(), 1)
input_tensor[index_over_budget] = torch.randn_like(
input_tensor[index_over_budget]
)
else:
input_tensor = mask
value = self.model(input_tensor)
return value - 1e10 * previous_actions
class EvaluatorBasedValueNetwork(nn.Module):
""" Value network based on Zhang et al., CVPR'19 evaluator architecture. """
def __init__(
self, image_width: int, mask_embed_dim: int, legacy_offset: Optional[int] = None
):
super().__init__()
num_actions = image_width
if legacy_offset:
num_actions -= 2 * legacy_offset
self.legacy_offset = legacy_offset
self.evaluator = cvpr19_evaluator.EvaluatorNetwork(
number_of_filters=128,
number_of_conv_layers=4,
use_sigmoid=False,
width=image_width,
mask_embed_dim=mask_embed_dim,
num_output_channels=num_actions,
)
self.mask_embed_dim = mask_embed_dim
def forward(self, obs: torch.Tensor) -> torch.Tensor:
"""Predicts action values.
Args:
obs(torch.Tensor): The observation tensor.
Returns:
torch.Tensor: Q-values for all actions at the given observation.
Note:
Values corresponding to active k-space columns in the observation are manually
set to ``1e-10``.
"""
reconstruction, mask, mask_embedding = _decode_obs_tensor(
obs, self.evaluator.mask_embed_dim
)
qvalue = self.evaluator(reconstruction, mask_embedding)
if self.legacy_offset:
mask = mask[..., self.legacy_offset : -self.legacy_offset]
return qvalue - 1e10 * mask.squeeze()
def _get_model(options):
if options.dqn_model_type == "simple_mlp":
return SimpleMLP(options.budget, options.image_width)
if options.dqn_model_type == "evaluator":
return EvaluatorBasedValueNetwork(
options.image_width,
options.mask_embedding_dim,
legacy_offset=getattr(options, "legacy_offset", None),
)
raise ValueError("Unknown model specified for DQN.")
class DDQN(nn.Module, Policy):
"""Implementation of Double DQN value network.
The configuration is given by the ``opts`` argument, which must contain the following
fields:
- mask_embedding_dim(int): See
:class:`cvpr19_models.models.evaluator.EvaluatorNetwork`.
- gamma(float): Discount factor for target updates.
- dqn_model_type(str): Describes the architecture of the neural net. Options
are "simple_mlp" and "evaluator", to use :class:`SimpleMLP` and
:class:`EvaluatorBasedValueNetwork`, respectively.
- budget(int): The environment's budget.
- image_width(int): The width of the input images.
Args:
device(``torch.device``): Device to use.
memory(optional(``replay_buffer.ReplayMemory``)): Replay buffer to sample transitions
from. Can be ``None``, for example, if this is a target network.
opts(``argparse.Namespace``): Options for the algorithm as explained above.
"""
def __init__(
self,
device: torch.device,
memory: Optional[replay_buffer.ReplayMemory],
opts: argparse.Namespace,
):
super().__init__()
self.model = _get_model(opts)
self.memory = memory
self.optimizer = optim.Adam(self.parameters(), lr=opts.dqn_learning_rate)
self.opts = opts
self.device = device
self.random_sampler = RandomPolicy()
self.to(device)
def add_experience(
self,
observation: np.array,
action: int,
next_observation: np.array,
reward: float,
done: bool,
):
self.memory.push(observation, action, next_observation, reward, done)
def update_parameters(self, target_net: nn.Module) -> Optional[Dict[str, Any]]:
self.model.train()
batch = self.memory.sample()
if batch is None:
return None
observations = batch["observations"].to(self.device)
next_observations = batch["next_observations"].to(self.device)
actions = batch["actions"].to(self.device)
rewards = batch["rewards"].to(self.device).squeeze()
dones = batch["dones"].to(self.device)
not_done_mask = dones.logical_not().squeeze()
# Compute Q-values and get best action according to online network
output_cur_step = self.forward(observations)
all_q_values_cur = output_cur_step
q_values = all_q_values_cur.gather(1, actions.unsqueeze(1))
# Compute target values using the best action found
if self.opts.gamma == 0.0:
target_values = rewards
else:
with torch.no_grad():
all_q_values_next = self.forward(next_observations)
target_values = torch.zeros(observations.shape[0], device=self.device)
del observations
if not_done_mask.any().item() != 0:
best_actions = all_q_values_next.detach().max(1)[1]
target_values[not_done_mask] = (
target_net.forward(next_observations)
.gather(1, best_actions.unsqueeze(1))[not_done_mask]
.squeeze()
.detach()
)
target_values = self.opts.gamma * target_values + rewards
# loss = F.mse_loss(q_values, target_values.unsqueeze(1))
loss = F.smooth_l1_loss(q_values, target_values.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
# Compute total gradient norm (for logging purposes) and then clip gradients
grad_norm: torch.Tensor = 0 # type: ignore
for p in list(filter(lambda p: p.grad is not None, self.parameters())):
grad_norm += p.grad.data.norm(2).item() ** 2
grad_norm = grad_norm ** 0.5
torch.nn.utils.clip_grad_value_(self.parameters(), 1)
self.optimizer.step()
torch.cuda.empty_cache()
return {
"loss": loss,
"grad_norm": grad_norm,
"q_values_mean": q_values.detach().mean().cpu().numpy(),
"q_values_std": q_values.detach().std().cpu().numpy(),
}
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Predicts action values.
Args:
x(torch.Tensor): The observation tensor.
Returns:
Dictionary(torch.Tensor): The predicted Q-values.
Note:
Values corresponding to active k-space columns in the observation are manually
set to ``1e-10``.
"""
return self.model(x)
def get_action( # type: ignore
self, obs: Dict[str, Any], eps_threshold: float = 0.0
) -> List[int]:
"""Returns an action sampled from an epsilon-greedy policy.
With probability epsilon sample a random k-space column (ignoring active columns),
otherwise return the column with the highest estimated Q-value for the observation.
Args:
obs(torch.Tensor): The observation for which an action is required.
eps_threshold(float): The probability of sampling a random action instead of using
a greedy action.
"""
sample = random.random()
if sample < eps_threshold:
return self.random_sampler.get_action(obs)
with torch.no_grad():
self.model.eval()
obs_tensor = _encode_obs_dict(obs)
q_values = self(obs_tensor.to(self.device))
actions = torch.argmax(q_values, dim=1) + getattr(self.opts, "legacy_offset", 0)
return actions.tolist()
def _get_folder_lock(path):
return filelock.FileLock(path, timeout=-1)
class DDQNTester:
def __init__(
self, env: mri_envs.ActiveMRIEnv, training_dir: str, device: torch.device
):
self.env = env
self.device = device
self.training_dir = training_dir
self.evaluation_dir = os.path.join(training_dir, "evaluation")
os.makedirs(self.evaluation_dir, exist_ok=True)
self.folder_lock_path = DDQNTrainer.get_lock_filename(training_dir)
self.latest_policy_path = DDQNTrainer.get_name_latest_checkpoint(
self.training_dir
)
self.best_test_score = -np.inf
self.last_time_stamp = -np.inf
self.options = None
# Initialize writer and logger
self.writer = tensorboardX.SummaryWriter(os.path.join(self.evaluation_dir))
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"
)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
ch.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(self.evaluation_dir, "evaluation.log"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# Read the options used for training
options_file_found = False
while not options_file_found:
options_filename = DDQNTrainer.get_options_filename(self.training_dir)
with _get_folder_lock(self.folder_lock_path):
if os.path.isfile(options_filename):
self.logger.info(f"Options file found at {options_filename}.")
with open(options_filename, "rb") as f:
self.options = pickle.load(f)
options_file_found = True
if not options_file_found:
self.logger.info(f"No options file found at {options_filename}.")
self.logger.info("I will wait for five minutes before trying again.")
time.sleep(300)
# This change is needed so that util.test_policy writes results to correct directory
self.options.checkpoints_dir = self.evaluation_dir
os.makedirs(self.evaluation_dir, exist_ok=True)
# Initialize environment
self.options.image_width = self.env.action_space.n
self.logger.info(f"Created environment with {self.env.action_space.n} actions")
self.logger.info(f"Checkpoint dir for this job is {self.evaluation_dir}")
self.logger.info(
f"Evaluation will be done for model saved at {self.training_dir}"
)
# Initialize policy
self.policy = DDQN(device, None, self.options)
# Load info about best checkpoint tested and timestamp
self.load_tester_checkpoint_if_present()
def __call__(self):
training_done = False
while not training_done:
training_done = self.check_if_train_done()
self.logger.info(f"Is training done? {training_done}.")
checkpoint_episode, timestamp = self.load_latest_policy()
if timestamp is None or timestamp <= self.last_time_stamp:
# No new policy checkpoint to evaluate
self.logger.info(
"No new policy to evaluate. "
"I will wait for 10 minutes before trying again."
)
time.sleep(600)
continue
self.logger.info(
f"Found a new checkpoint with timestamp {timestamp}, "
f"I will start evaluation now."
)
test_scores, _ = evaluation.evaluate(
self.env,
self.policy,
self.options.num_test_episodes,
self.options.seed,
"val",
verbose=True,
)
auc_score = test_scores[self.options.reward_metric].sum(axis=1).mean()
if "mse" in self.options.reward_metric:
auc_score *= -1
self.logger.info(f"The test score for the model was {auc_score}.")
self.last_time_stamp = timestamp
if auc_score > self.best_test_score:
self.save_tester_checkpoint()
policy_path = os.path.join(self.evaluation_dir, "policy_best.pt")
self.save_policy(policy_path, checkpoint_episode)
self.best_test_score = auc_score
self.logger.info(
f"Saved DQN model with score {self.best_test_score} to {policy_path}, "
f"corresponding to episode {checkpoint_episode}."
)
def check_if_train_done(self):
with _get_folder_lock(self.folder_lock_path):
return os.path.isfile(DDQNTrainer.get_done_filename(self.training_dir))
def checkpoint(self):
self.save_tester_checkpoint()
def save_tester_checkpoint(self):
path = os.path.join(self.evaluation_dir, "tester_checkpoint.pickle")
with open(path, "wb") as f:
pickle.dump(
{
"best_test_score": self.best_test_score,
"last_time_stamp": self.last_time_stamp,
},
f,
)
def load_tester_checkpoint_if_present(self):
path = os.path.join(self.evaluation_dir, "tester_checkpoint.pickle")
if os.path.isfile(path):
with open(path, "rb") as f:
checkpoint = pickle.load(f)
self.best_test_score = checkpoint["best_test_score"]
self.last_time_stamp = checkpoint["last_time_stamp"]
self.logger.info(
f"Found checkpoint from previous evaluation run. "
f"Best Score set to {self.best_test_score}. "
f"Last Time Stamp set to {self.last_time_stamp}"
)
# noinspection PyProtectedMember
def load_latest_policy(self):
with _get_folder_lock(self.folder_lock_path):
if not os.path.isfile(self.latest_policy_path):
return None, None
timestamp = os.path.getmtime(self.latest_policy_path)
checkpoint = torch.load(self.latest_policy_path, map_location=self.device)
self.policy.load_state_dict(checkpoint["dqn_weights"])
return checkpoint["episode"], timestamp
def save_policy(self, path, episode):
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"episode": episode,
"options": self.options,
},
path,
)
class DDQNTrainer:
"""DDQN Trainer for active MRI acquisition.
Configuration for the trainer is provided by argument ``options``. Must contain the
following fields:
- "checkpoints_dir"(str): The directory where the model will be saved to (or
loaded from).
- dqn_batch_size(int): The batch size to use for updates.
- dqn_burn_in(int): How many steps to do before starting updating parameters.
- dqn_normalize(bool): ``True`` if running mean/st. deviation should be maintained
for observations.
- dqn_only_test(bool): ``True`` if the model will not be trained, thus only will
attempt to read from checkpoint and load only weights of the network (ignoring
training related information).
- dqn_test_episode_freq(optional(int)): How frequently (in number of env steps)
to perform test episodes.
- freq_dqn_checkpoint_save(int): How often (in episodes) to save the model.
- num_train_steps(int): How many environment steps to train for.
- replay_buffer_size(int): The capacity of the replay buffer.
- resume(bool): If true, will try to load weights from the checkpoints dir.
- num_test_episodes(int): How many test episodes to periodically evaluate for.
- seed(int): Sets the seed for the environment when running evaluation episodes.
- reward_metric(str): Which of the ``env.scores_keys()`` is used as reward. Mainly
used for logging purposes.
- target_net_update_freq(int): How often (in env's steps) to update the target
network.
Args:
options(``argparse.Namespace``): Options for the trainer.
env(``activemri.envs.ActiveMRIEnv``): Env for which the policy is trained.
device(``torch.device``): Device to use.
"""
def __init__(
self,
options: argparse.Namespace,
env: mri_envs.ActiveMRIEnv,
device: torch.device,
):
self.options = options
self.env = env
self.options.image_width = self.env.kspace_width
self.steps = 0
self.episode = 0
self.best_test_score = -np.inf
self.device = device
self.replay_memory = None
self.window_size = 1000
self.reward_images_in_window = np.zeros(self.window_size)
self.current_score_auc_window = np.zeros(self.window_size)
# ------- Init loggers ------
self.writer = tensorboardX.SummaryWriter(
os.path.join(self.options.checkpoints_dir)
)
self.logger = logging.getLogger()
logging_level = logging.DEBUG if self.options.debug else logging.INFO
self.logger.setLevel(logging_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging_level)
formatter = logging.Formatter(
"%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"
)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
fh = logging.FileHandler(
os.path.join(self.options.checkpoints_dir, "train.log")
)
fh.setLevel(logging_level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.info("Creating DDQN model.")
self.logger.info(
f"Creating replay buffer with capacity {options.mem_capacity}."
)
# ------- Create replay buffer and networks ------
# See _encode_obs_dict() for tensor format
self.obs_shape = (2, self.env.kspace_height + 2, self.env.kspace_width)
self.replay_memory = replay_buffer.ReplayMemory(
options.mem_capacity,
self.obs_shape,
self.options.dqn_batch_size,
self.options.dqn_burn_in,
use_normalization=self.options.dqn_normalize,
)
self.logger.info("Created replay buffer.")
self.policy = DDQN(device, self.replay_memory, self.options)
self.target_net = DDQN(device, None, self.options)
self.target_net.eval()
self.logger.info(
f"Created neural networks with {self.env.action_space.n} outputs."
)
# ------- Files used to communicate with DDQNTester ------
self.folder_lock_path = DDQNTrainer.get_lock_filename(
self.options.checkpoints_dir
)
with _get_folder_lock(self.folder_lock_path):
# Write options so that tester can read them
with open(
DDQNTrainer.get_options_filename(self.options.checkpoints_dir), "wb"
) as f:
pickle.dump(self.options, f)
# Remove previous done file since training will start over
done_file = DDQNTrainer.get_done_filename(self.options.checkpoints_dir)
if os.path.isfile(done_file):
os.remove(done_file)
@staticmethod
def get_done_filename(path):
return os.path.join(path, "DONE")
@staticmethod
def get_name_latest_checkpoint(path):
return os.path.join(path, "policy_checkpoint.pth")
@staticmethod
def get_options_filename(path):
return os.path.join(path, "options.pickle")
@staticmethod
def get_lock_filename(path):
return os.path.join(path, ".LOCK")
def _max_replay_buffer_size(self):
return min(self.options.num_train_steps, self.options.replay_buffer_size)
def load_checkpoint_if_needed(self):
if self.options.dqn_only_test or self.options.resume:
policy_path = os.path.join(self.options.dqn_weights_path)
if os.path.isfile(policy_path):
self.load(policy_path)
self.logger.info(f"Loaded DQN policy found at {policy_path}.")
else:
self.logger.warning(f"No DQN policy found at {policy_path}.")
if self.options.dqn_only_test:
raise FileNotFoundError
def _train_dqn_policy(self):
""" Trains the DQN policy. """
self.logger.info(
f"Starting training at step {self.steps}/{self.options.num_train_steps}. "
f"Best score so far is {self.best_test_score}."
)
steps_epsilon = self.steps
while self.steps < self.options.num_train_steps:
self.logger.info("Episode {}".format(self.episode + 1))
# Evaluate the current policy
if self.options.dqn_test_episode_freq and (
self.episode % self.options.dqn_test_episode_freq == 0
):
test_scores, _ = evaluation.evaluate(
self.env,
self.policy,
self.options.num_test_episodes,
self.options.seed,
"val",
)
self.env.set_training()
auc_score = test_scores[self.options.reward_metric].sum(axis=1).mean()
if "mse" in self.options.reward_metric:
auc_score *= -1
if auc_score > self.best_test_score:
policy_path = os.path.join(
self.options.checkpoints_dir, "policy_best.pt"
)
self.save(policy_path)
self.best_test_score = auc_score
self.logger.info(
f"Saved DQN model with score {self.best_test_score} to {policy_path}."
)
# Save model periodically
if self.episode % self.options.freq_dqn_checkpoint_save == 0:
self.checkpoint(save_memory=False)
# Run an episode and update model
obs, meta = self.env.reset()
msg = ", ".join(
[
f"({meta['fname'][i]}, {meta['slice_id'][i]})"
for i in range(len(meta["slice_id"]))
]
)
self.logger.info(f"Episode started with images {msg}.")
all_done = False
total_reward = 0
auc_score = 0
while not all_done:
epsilon = _get_epsilon(steps_epsilon, self.options)
action = self.policy.get_action(obs, eps_threshold=epsilon)
next_obs, reward, done, meta = self.env.step(action)
auc_score += meta["current_score"][self.options.reward_metric]
all_done = all(done)
self.steps += 1
obs_tensor = _encode_obs_dict(obs)
next_obs_tensor = _encode_obs_dict(next_obs)
batch_size = len(obs_tensor)
for i in range(batch_size):
self.policy.add_experience(
obs_tensor[i], action[i], next_obs_tensor[i], reward[i], done[i]
)
update_results = self.policy.update_parameters(self.target_net)
torch.cuda.empty_cache()
if self.steps % self.options.target_net_update_freq == 0:
self.logger.info("Updating target network.")
self.target_net.load_state_dict(self.policy.state_dict())
steps_epsilon += 1
# Adding per-step tensorboard logs
if self.steps % 250 == 0:
self.logger.debug("Writing to tensorboard.")
self.writer.add_scalar("epsilon", epsilon, self.steps)
if update_results is not None:
self.writer.add_scalar(
"loss", update_results["loss"], self.steps
)
self.writer.add_scalar(
"grad_norm", update_results["grad_norm"], self.steps
)
self.writer.add_scalar(
"mean_q_value", update_results["q_values_mean"], self.steps
)
self.writer.add_scalar(
"std_q_value", update_results["q_values_std"], self.steps
)
total_reward += reward
obs = next_obs
# Adding per-episode tensorboard logs
total_reward = total_reward.mean().item()
auc_score = auc_score.mean().item()
self.reward_images_in_window[self.episode % self.window_size] = total_reward
self.current_score_auc_window[self.episode % self.window_size] = auc_score
self.writer.add_scalar("episode_reward", total_reward, self.episode)
self.writer.add_scalar(
"average_reward_images_in_window",
np.sum(self.reward_images_in_window)
/ min(self.episode + 1, self.window_size),
self.episode,
)
self.writer.add_scalar(
"average_auc_score_in_window",
np.sum(self.current_score_auc_window)
/ min(self.episode + 1, self.window_size),
self.episode,
)
self.episode += 1
self.checkpoint()
# Writing DONE file with best test score
with _get_folder_lock(self.folder_lock_path):
with open(
DDQNTrainer.get_done_filename(self.options.checkpoints_dir), "w"
) as f:
f.write(str(self.best_test_score))
return self.best_test_score
def __call__(self):
self.load_checkpoint_if_needed()
return self._train_dqn_policy()
def checkpoint(self, save_memory=True):
policy_path = DDQNTrainer.get_name_latest_checkpoint(
self.options.checkpoints_dir
)
self.save(policy_path)
self.logger.info(f"Saved DQN checkpoint to {policy_path}")
if save_memory:
self.logger.info("Now saving replay memory.")
memory_path = self.replay_memory.save(
self.options.checkpoints_dir, "replay_buffer.pt"
)
self.logger.info(f"Saved replay buffer to {memory_path}.")
def save(self, path):
with _get_folder_lock(self.folder_lock_path):
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"target_weights": self.target_net.state_dict(),
"options": self.options,
"episode": self.episode,
"steps": self.steps,
"best_test_score": self.best_test_score,
"reward_images_in_window": self.reward_images_in_window,
"current_score_auc_window": self.current_score_auc_window,
},
path,
)
def load(self, path):
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
self.episode = checkpoint["episode"] + 1
if not self.options.dqn_only_test:
self.target_net.load_state_dict(checkpoint["target_weights"])
self.steps = checkpoint["steps"]
self.best_test_score = checkpoint["best_test_score"]
self.reward_images_in_window = checkpoint["reward_images_in_window"]
self.current_score_auc_window = checkpoint["current_score_auc_window"]
| active-mri-acquisition-main | activemri/baselines/ddqn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import activemri.baselines as baselines
import activemri.envs as envs
def evaluate(
env: envs.envs.ActiveMRIEnv,
policy: baselines.Policy,
num_episodes: int,
seed: int,
split: str,
verbose: Optional[bool] = False,
) -> Tuple[Dict[str, np.ndarray], List[Tuple[Any, Any]]]:
env.seed(seed)
if split == "test":
env.set_test()
elif split == "val":
env.set_val()
else:
raise ValueError(f"Invalid evaluation split: {split}.")
score_keys = env.score_keys()
all_scores = dict(
(k, np.zeros((num_episodes * env.num_parallel_episodes, env.budget + 1)))
for k in score_keys
)
all_img_ids = []
trajectories_written = 0
for episode in range(num_episodes):
step = 0
obs, meta = env.reset()
if not obs:
break # no more images
# in case the last batch is smaller
actual_batch_size = len(obs["reconstruction"])
if verbose:
msg = ", ".join(
[
f"({meta['fname'][i]}, {meta['slice_id'][i]})"
for i in range(actual_batch_size)
]
)
print(f"Read images: {msg}")
for i in range(actual_batch_size):
all_img_ids.append((meta["fname"][i], meta["slice_id"][i]))
batch_idx = slice(
trajectories_written, trajectories_written + actual_batch_size
)
for k in score_keys:
all_scores[k][batch_idx, step] = meta["current_score"][k]
trajectories_written += actual_batch_size
all_done = False
while not all_done:
step += 1
action = policy.get_action(obs)
obs, reward, done, meta = env.step(action)
for k in score_keys:
all_scores[k][batch_idx, step] = meta["current_score"][k]
all_done = all(done)
for k in score_keys:
all_scores[k] = all_scores[k][: len(all_img_ids), :]
return all_scores, all_img_ids
| active-mri-acquisition-main | activemri/baselines/evaluation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Any, Dict, List
class Policy:
""" A basic policy interface. """
def __init__(self, *args, **kwargs):
pass
@abc.abstractmethod
def get_action(self, obs: Dict[str, Any], **kwargs: Any) -> List[int]:
""" Returns a list of actions for a batch of observations. """
pass
def __call__(self, obs: Dict[str, Any], **kwargs: Any) -> List[int]:
return self.get_action(obs, **kwargs)
from .simple_baselines import (
RandomPolicy,
RandomLowBiasPolicy,
LowestIndexPolicy,
OneStepGreedyOracle,
)
from .cvpr19_evaluator import CVPR19Evaluator
from .ddqn import DDQN, DDQNTrainer
from .evaluation import evaluate
__all__ = [
"RandomPolicy",
"RandomLowBiasPolicy",
"LowestIndexPolicy",
"OneStepGreedyOracle",
"CVPR19Evaluator",
"DDQN",
"DDQNTrainer",
"evaluate",
]
| active-mri-acquisition-main | activemri/baselines/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.baselines.simple_baselines.py
=======================================
Simple baselines for active MRI acquisition.
"""
from typing import Any, Dict, List, Optional
import numpy as np
import torch
import activemri.envs
from . import Policy
class RandomPolicy(Policy):
"""A policy representing random k-space selection.
Returns one of the valid actions uniformly at random.
Args:
seed(optional(int)): The seed to use for the random number generator, which is
based on ``torch.Generator()``.
"""
def __init__(self, seed: Optional[int] = None):
super().__init__()
self.rng = torch.Generator()
if seed:
self.rng.manual_seed(seed)
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
"""Returns a random action without replacement.
Args:
obs(dict(str, any)): As returned by :class:`activemri.envs.ActiveMRIEnv`.
Returns:
list(int): A list of random k-space column indices, one per batch element in
the observation. The indices are sampled from the set of inactive (0) columns
on each batch element.
"""
return (
(obs["mask"].logical_not().float() + 1e-6)
.multinomial(1, generator=self.rng)
.squeeze()
.tolist()
)
class RandomLowBiasPolicy(Policy):
def __init__(
self, acceleration: float, centered: bool = True, seed: Optional[int] = None
):
super().__init__()
self.acceleration = acceleration
self.centered = centered
self.rng = np.random.RandomState(seed)
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
mask = obs["mask"].squeeze().cpu().numpy()
new_mask = self._cartesian_mask(mask)
action = (new_mask - mask).argmax(axis=1)
return action.tolist()
@staticmethod
def _normal_pdf(length: int, sensitivity: float):
return np.exp(-sensitivity * (np.arange(length) - length / 2) ** 2)
def _cartesian_mask(self, current_mask: np.ndarray) -> np.ndarray:
batch_size, image_width = current_mask.shape
pdf_x = RandomLowBiasPolicy._normal_pdf(
image_width, 0.5 / (image_width / 10.0) ** 2
)
pdf_x = np.expand_dims(pdf_x, axis=0)
lmda = image_width / (2.0 * self.acceleration)
# add uniform distribution
pdf_x += lmda * 1.0 / image_width
# remove previously chosen columns
# note that pdf_x designed for centered masks
new_mask = (
np.fft.ifftshift(current_mask, axes=1)
if not self.centered
else current_mask.copy()
)
pdf_x = pdf_x * np.logical_not(new_mask)
# normalize probabilities and choose accordingly
pdf_x /= pdf_x.sum(axis=1, keepdims=True)
indices = [
self.rng.choice(image_width, 1, False, pdf_x[i]).item()
for i in range(batch_size)
]
new_mask[range(batch_size), indices] = 1
if not self.centered:
new_mask = np.fft.ifftshift(new_mask, axes=1)
return new_mask
class LowestIndexPolicy(Policy):
"""A policy that represents low-to-high frequency k-space selection.
Args:
alternate_sides(bool): If ``True`` the indices of selected actions will alternate
between the sides of the mask. For example, for an image with 100
columns, and non-centered k-space, the order will be 0, 99, 1, 98, 2, 97, ..., etc.
For the same size and centered, the order will be 49, 50, 48, 51, 47, 52, ..., etc.
centered(bool): If ``True`` (default), low frequencies are in the center of the mask.
Otherwise, they are in the edges of the mask.
"""
def __init__(
self,
alternate_sides: bool,
centered: bool = True,
):
super().__init__()
self.alternate_sides = alternate_sides
self.centered = centered
self.bottom_side = True
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
"""Returns a random action without replacement.
Args:
obs(dict(str, any)): As returned by :class:`activemri.envs.ActiveMRIEnv`.
Returns:
list(int): A list of k-space column indices, one per batch element in
the observation, equal to the lowest non-active k-space column in their
corresponding observation masks.
"""
mask = obs["mask"].squeeze().cpu().numpy()
new_mask = self._get_new_mask(mask)
action = (new_mask - mask).argmax(axis=1)
return action.tolist()
def _get_new_mask(self, current_mask: np.ndarray) -> np.ndarray:
# The code below assumes mask in non centered
new_mask = (
np.fft.ifftshift(current_mask, axes=1)
if self.centered
else current_mask.copy()
)
if self.bottom_side:
idx = np.arange(new_mask.shape[1], 0, -1)
else:
idx = np.arange(new_mask.shape[1])
if self.alternate_sides:
self.bottom_side = not self.bottom_side
# Next line finds the first non-zero index (from edge to center) and returns it
indices = (np.logical_not(new_mask) * idx).argmax(axis=1)
indices = np.expand_dims(indices, axis=1)
new_mask[range(new_mask.shape[0]), indices] = 1
if self.centered:
new_mask = np.fft.ifftshift(new_mask, axes=1)
return new_mask
class OneStepGreedyOracle(Policy):
"""A policy that returns the k-space column leading to best reconstruction score.
Args:
env(``activemri.envs.ActiveMRIEnv``): The environment for which the policy is computed
for.
metric(str): The name of the score metric to use (must be in ``env.score_keys()``).
num_samples(optional(int)): If given, only ``num_samples`` random actions will be
tested. Defaults to ``None``, which means that method will consider all actions.
rng(``numpy.random.RandomState``): A random number generator to use for sampling.
"""
def __init__(
self,
env: activemri.envs.ActiveMRIEnv,
metric: str,
num_samples: Optional[int] = None,
rng: Optional[np.random.RandomState] = None,
):
assert metric in env.score_keys()
super().__init__()
self.env = env
self.metric = metric
self.num_samples = num_samples
self.rng = rng if rng is not None else np.random.RandomState()
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
"""Returns a one-step greedy action maximizing reconstruction score.
Args:
obs(dict(str, any)): As returned by :class:`activemri.envs.ActiveMRIEnv`.
Returns:
list(int): A list of k-space column indices, one per batch element in
the observation, equal to the action that maximizes reconstruction score
(e.g, SSIM or negative MSE).
"""
mask = obs["mask"]
batch_size = mask.shape[0]
all_action_lists = []
for i in range(batch_size):
available_actions = mask[i].logical_not().nonzero().squeeze().tolist()
self.rng.shuffle(available_actions)
if len(available_actions) < self.num_samples:
# Add dummy actions to try if num of samples is higher than the
# number of inactive columns in this mask
available_actions.extend(
[0] * (self.num_samples - len(available_actions))
)
all_action_lists.append(available_actions)
all_scores = np.zeros((batch_size, self.num_samples))
for i in range(self.num_samples):
batch_action_to_try = [action_list[i] for action_list in all_action_lists]
obs, new_score = self.env.try_action(batch_action_to_try)
all_scores[:, i] = new_score[self.metric]
if self.metric in ["mse", "nmse"]:
all_scores *= -1
else:
assert self.metric in ["ssim", "psnr"]
best_indices = all_scores.argmax(axis=1)
action = []
for i in range(batch_size):
action.append(all_action_lists[i][best_indices[i]])
return action
| active-mri-acquisition-main | activemri/baselines/simple_baselines.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
from typing import Dict, Optional
import numpy as np
import torch
class ReplayMemory:
"""Replay memory of transitions (ot, at, o_t+1, r_t+1).
Args:
capacity(int): How many transitions can be stored. After capacity is reached early
transitions are overwritten in FIFO fashion.
obs_shape(np.array): The shape of the numpy arrays representing observations.
batch_size(int): The size of batches returned by the replay buffer.
burn_in(int): While the replay buffer has lesser entries than this number,
:meth:`sample()` will return ``None``. Indicates a burn-in period before
training.
use_normalization(bool): If ``True``, the replay buffer will keep running mean
and standard deviation for the observations. Defaults to ``False``.
"""
def __init__(
self,
capacity: int,
obs_shape: np.array,
batch_size: int,
burn_in: int,
use_normalization: bool = False,
):
assert burn_in >= batch_size
self.batch_size = batch_size
self.burn_in = burn_in
self.observations = torch.zeros(capacity, *obs_shape, dtype=torch.float32)
self.actions = torch.zeros(capacity, dtype=torch.long)
self.next_observations = torch.zeros(capacity, *obs_shape, dtype=torch.float32)
self.rewards = torch.zeros(capacity, dtype=torch.float32)
self.dones = torch.zeros(capacity, dtype=torch.bool)
self.position = 0
self.mean_obs = torch.zeros(obs_shape, dtype=torch.float32)
self.std_obs = torch.ones(obs_shape, dtype=torch.float32)
self._m2_obs = torch.ones(obs_shape, dtype=torch.float32)
self.count_seen = 1
if not use_normalization:
self._normalize = lambda x: x # type: ignore
self._denormalize = lambda x: x # type: ignore
def _normalize(self, observation: torch.Tensor) -> Optional[torch.Tensor]:
if observation is None:
return None
return (observation - self.mean_obs) / self.std_obs
def _denormalize(self, observation: torch.Tensor) -> Optional[torch.Tensor]:
if observation is None:
return None
return self.std_obs * observation + self.mean_obs
def _update_stats(self, observation: torch.Tensor):
self.count_seen += 1
delta = observation - self.mean_obs
self.mean_obs = self.mean_obs + delta / self.count_seen
delta2 = observation - self.mean_obs
self._m2_obs = self._m2_obs + (delta * delta2)
self.std_obs = np.sqrt(self._m2_obs / (self.count_seen - 1))
def push(
self,
observation: np.array,
action: int,
next_observation: np.array,
reward: float,
done: bool,
):
""" Pushes a transition into the replay buffer. """
self.observations[self.position] = observation.clone()
self.actions[self.position] = torch.tensor([action], dtype=torch.long)
self.next_observations[self.position] = next_observation.clone()
self.rewards[self.position] = torch.tensor([reward], dtype=torch.float32)
self.dones[self.position] = torch.tensor([done], dtype=torch.bool)
self._update_stats(self.observations[self.position])
self.position = (self.position + 1) % len(self)
def sample(self) -> Optional[Dict[str, Optional[torch.Tensor]]]:
"""Samples a batch of transitions from the replay buffer.
Returns:
Dictionary(str, torch.Tensor): Contains keys for "observations",
"next_observations", "actions", "rewards", "dones". If the number of entries
in the buffer is less than ``self.burn_in``, then returns ``None`` instead.
"""
if self.count_seen - 1 < self.burn_in:
return None
indices = np.random.choice(min(self.count_seen - 1, len(self)), self.batch_size)
return {
"observations": self._normalize(self.observations[indices]),
"next_observations": self._normalize(self.next_observations[indices]),
"actions": self.actions[indices],
"rewards": self.rewards[indices],
"dones": self.dones[indices],
}
def save(self, directory: str, name: str):
""" Saves all tensors and normalization info to file `directory/name` """
data = {
"observations": self.observations,
"actions": self.actions,
"next_observations": self.next_observations,
"rewards": self.rewards,
"dones": self.dones,
"position": self.position,
"mean_obs": self.mean_obs,
"std_obs": self.std_obs,
"m2_obs": self._m2_obs,
"count_seen": self.count_seen,
}
tmp_filename = tempfile.NamedTemporaryFile(delete=False, dir=directory)
try:
torch.save(data, tmp_filename)
except BaseException:
tmp_filename.close()
os.remove(tmp_filename.name)
raise
else:
tmp_filename.close()
full_path = os.path.join(directory, name)
os.rename(tmp_filename.name, full_path)
return full_path
def load(self, path: str, capacity: Optional[int] = None):
"""Loads the replay buffer from the specified path.
Args:
path(str): The path from where the memory will be loaded from.
capacity(int): If provided, the buffer is created with this much capacity. This
value must be larger than the length of the stored tensors.
"""
data = torch.load(path)
self.position = data["position"]
self.mean_obs = data["mean_obs"]
self.std_obs = data["std_obs"]
self._m2_obs = data["m2_obs"]
self.count_seen = data["count_seen"]
old_len = data["observations"].shape[0]
if capacity is None:
self.observations = data["observations"]
self.actions = data["actions"]
self.next_observations = data["next_observations"]
self.rewards = data["rewards"]
self.dones = data["dones"]
else:
assert capacity >= len(data["observations"])
obs_shape = data["observations"].shape[1:]
self.observations = torch.zeros(capacity, *obs_shape, dtype=torch.float32)
self.actions = torch.zeros(capacity, dtype=torch.long)
self.next_observations = torch.zeros(
capacity, *obs_shape, dtype=torch.float32
)
self.rewards = torch.zeros(capacity, dtype=torch.float32)
self.dones = torch.zeros(capacity, dtype=torch.bool)
self.observations[:old_len] = data["observations"]
self.actions[:old_len] = data["actions"]
self.next_observations[:old_len] = data["next_observations"]
self.rewards[:old_len] = data["rewards"]
self.dones[:old_len] = data["dones"]
return old_len
def __len__(self):
return len(self.observations)
| active-mri-acquisition-main | activemri/baselines/replay_buffer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import torch
import activemri.experimental.cvpr19_models.models.evaluator as cvpr19_evaluator
from . import Policy
# This is just a wrapper for the model in cvpr19_models folder
class CVPR19Evaluator(Policy):
def __init__(
self,
evaluator_path: str,
device: torch.device,
add_mask: bool = False,
):
super().__init__()
evaluator_checkpoint = torch.load(evaluator_path)
assert (
evaluator_checkpoint is not None
and evaluator_checkpoint["evaluator"] is not None
)
self.evaluator = cvpr19_evaluator.EvaluatorNetwork(
number_of_filters=evaluator_checkpoint[
"options"
].number_of_evaluator_filters,
number_of_conv_layers=evaluator_checkpoint[
"options"
].number_of_evaluator_convolution_layers,
use_sigmoid=False,
width=evaluator_checkpoint["options"].image_width,
height=640,
mask_embed_dim=evaluator_checkpoint["options"].mask_embed_dim,
)
self.evaluator.load_state_dict(
{
key.replace("module.", ""): val
for key, val in evaluator_checkpoint["evaluator"].items()
}
)
self.evaluator.eval()
self.evaluator.to(device)
self.add_mask = add_mask
self.device = device
def get_action(self, obs: Dict[str, Any], **_kwargs) -> List[int]:
with torch.no_grad():
mask_embedding = (
None
if obs["extra_outputs"]["mask_embedding"] is None
else obs["extra_outputs"]["mask_embedding"].to(self.device)
)
mask = obs["mask"].bool().to(self.device)
mask = mask.view(mask.shape[0], 1, 1, -1)
k_space_scores = self.evaluator(
obs["reconstruction"].permute(0, 3, 1, 2).to(self.device),
mask_embedding,
mask if self.add_mask else None,
)
# Just fill chosen actions with some very large number to prevent from selecting again.
k_space_scores.masked_fill_(mask.squeeze(), 100000)
return torch.argmin(k_space_scores, dim=1).tolist()
| active-mri-acquisition-main | activemri/baselines/cvpr19_evaluator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.envs.envs.py
====================================
Gym-like environment for active MRI acquisition.
"""
import functools
import json
import os
import pathlib
import warnings
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Sized,
Tuple,
Union,
)
import fastmri.data
import gym
import numpy as np
import torch
import torch.utils.data
import activemri.data.singlecoil_knee_data as scknee_data
import activemri.data.transforms
import activemri.envs.masks
import activemri.envs.util
import activemri.models
DataInitFnReturnType = Tuple[
torch.utils.data.Dataset, torch.utils.data.Dataset, torch.utils.data.Dataset
]
# -----------------------------------------------------------------------------
# DATA HANDLING
# -----------------------------------------------------------------------------
class CyclicSampler(torch.utils.data.Sampler):
def __init__(
self,
data_source: Sized,
order: Optional[Sized] = None,
loops: int = 1,
):
torch.utils.data.Sampler.__init__(self, data_source)
assert loops > 0
assert order is None or len(order) == len(data_source)
self.data_source = data_source
self.order = order if order is not None else range(len(self.data_source))
self.loops = loops
def _iterator(self):
for _ in range(self.loops):
for j in self.order:
yield j
def __iter__(self):
return iter(self._iterator())
def __len__(self):
return len(self.data_source) * self.loops
def _env_collate_fn(
batch: Tuple[Union[np.array, list], ...]
) -> Tuple[Union[np.array, list], ...]:
ret = []
for i in range(6): # kspace, mask, target, attrs, fname, slice_id
ret.append([item[i] for item in batch])
return tuple(ret)
class DataHandler:
def __init__(
self,
data_source: torch.utils.data.Dataset,
seed: Optional[int],
batch_size: int = 1,
loops: int = 1,
collate_fn: Optional[Callable] = None,
):
self._iter = None # type: Iterator[Any]
self._collate_fn = collate_fn
self._batch_size = batch_size
self._loops = loops
self._init_impl(data_source, seed, batch_size, loops, collate_fn)
def _init_impl(
self,
data_source: torch.utils.data.Dataset,
seed: Optional[int],
batch_size: int = 1,
loops: int = 1,
collate_fn: Optional[Callable] = None,
):
rng = np.random.RandomState(seed)
order = rng.permutation(len(data_source))
sampler = CyclicSampler(data_source, order, loops=loops)
if collate_fn:
self._data_loader = torch.utils.data.DataLoader(
data_source,
batch_size=batch_size,
sampler=sampler,
collate_fn=collate_fn,
)
else:
self._data_loader = torch.utils.data.DataLoader(
data_source, batch_size=batch_size, sampler=sampler
)
self._iter = iter(self._data_loader)
def reset(self):
self._iter = iter(self._data_loader)
def __iter__(self):
return self._iter
def __next__(self):
return next(self._iter)
def seed(self, seed: int):
self._init_impl(
self._data_loader.dataset,
seed,
self._batch_size,
self._loops,
self._collate_fn,
)
# -----------------------------------------------------------------------------
# BASE ACTIVE MRI ENV
# -----------------------------------------------------------------------------
class ActiveMRIEnv(gym.Env):
"""Base class for all active MRI acquisition environments.
This class provides the core logic implementation of the k-space acquisition process.
The class is not to be used directly, but rather one of its subclasses should be
instantiated. Subclasses of `ActiveMRIEnv` are responsible for data initialization
and specifying configuration options for the environment.
Args:
kspace_shape(tuple(int,int)): Shape of the k-space slices for the dataset.
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
no_checkpoint(optional(bool)): Set to ``True`` if you want to run your reconstructor
model without loading anything from a checkpoint.
"""
_num_loops_train_data = 100000
metadata = {"render.modes": ["human"], "video.frames_per_second": None}
def __init__(
self,
kspace_shape: Tuple[int, int],
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
):
# Default initialization
self._cfg: Mapping[str, Any] = None
self._data_location: str = None
self._reconstructor: activemri.models.Reconstructor = None
self._transform: Callable = None
self._train_data_handler: DataHandler = None
self._val_data_handler: DataHandler = None
self._test_data_handler: DataHandler = None
self._device = torch.device("cpu")
self._has_setup = False
self.num_parallel_episodes = num_parallel_episodes
self.budget = budget
self._seed = seed
self._rng = np.random.RandomState(seed)
self.reward_metric = "mse"
# Init from provided configuration
self.kspace_height, self.kspace_width = kspace_shape
# Gym init
# Observation is a dictionary
self.observation_space = None
self.action_space = gym.spaces.Discrete(self.kspace_width)
# This is changed by `set_training()`, `set_val()`, `set_test()`
self._current_data_handler: DataHandler = None
# These are changed every call to `reset()`
self._current_ground_truth: torch.Tensor = None
self._transform_wrapper: Callable = None
self._current_k_space: torch.Tensor = None
self._did_reset = False
self._steps_since_reset = 0
# These three are changed every call to `reset()` and every call to `step()`
self._current_reconstruction_numpy: np.ndarray = None
self._current_score: Dict[str, np.ndarray] = None
self._current_mask: torch.Tensor = None
# -------------------------------------------------------------------------
# Protected methods
# -------------------------------------------------------------------------
def _setup(
self,
cfg_filename: str,
data_init_func: Callable[[], DataInitFnReturnType],
):
self._has_setup = True
self._init_from_config_file(cfg_filename)
self._setup_data_handlers(data_init_func)
def _setup_data_handlers(
self,
data_init_func: Callable[[], DataInitFnReturnType],
):
train_data, val_data, test_data = data_init_func()
self._train_data_handler = DataHandler(
train_data,
self._seed,
batch_size=self.num_parallel_episodes,
loops=self._num_loops_train_data,
collate_fn=_env_collate_fn,
)
self._val_data_handler = DataHandler(
val_data,
self._seed + 1 if self._seed else None,
batch_size=self.num_parallel_episodes,
loops=1,
collate_fn=_env_collate_fn,
)
self._test_data_handler = DataHandler(
test_data,
self._seed + 2 if self._seed else None,
batch_size=self.num_parallel_episodes,
loops=1,
collate_fn=_env_collate_fn,
)
self._current_data_handler = self._train_data_handler
def _init_from_config_dict(self, cfg: Mapping[str, Any]):
self._cfg = cfg
self._data_location = cfg["data_location"]
if not os.path.isdir(self._data_location):
default_cfg, defaults_fname = activemri.envs.util.get_defaults_json()
self._data_location = default_cfg["data_location"]
if not os.path.isdir(self._data_location) and self._has_setup:
raise RuntimeError(
f"No 'data_location' key found in the given config. Please "
f"write dataset location in your JSON config, or in file {defaults_fname} "
f"(to use as a default)."
)
self._device = torch.device(cfg["device"])
self.reward_metric = cfg["reward_metric"]
if self.reward_metric not in ["mse", "ssim", "psnr", "nmse"]:
raise ValueError("Reward metric must be one of mse, nmse, ssim, or psnr.")
mask_func = activemri.envs.util.import_object_from_str(cfg["mask"]["function"])
self._mask_func = functools.partial(mask_func, cfg["mask"]["args"])
# Instantiating reconstructor
reconstructor_cfg = cfg["reconstructor"]
reconstructor_cls = activemri.envs.util.import_object_from_str(
reconstructor_cfg["cls"]
)
checkpoint_fname = pathlib.Path(reconstructor_cfg["checkpoint_fname"])
default_cfg, defaults_fname = activemri.envs.util.get_defaults_json()
saved_models_dir = default_cfg["saved_models_dir"]
checkpoint_path = pathlib.Path(saved_models_dir) / checkpoint_fname
if self._has_setup and not checkpoint_path.is_file():
raise RuntimeError(
f"No checkpoint was found at {str(checkpoint_path)}. "
f"Please make sure that both 'checkpoint_fname' (in your JSON config) "
f"and 'saved_models_dir' (in {defaults_fname}) are configured correctly."
)
checkpoint = (
torch.load(str(checkpoint_path)) if checkpoint_path.is_file() else None
)
options = reconstructor_cfg["options"]
if checkpoint and "options" in checkpoint:
msg = (
f"Checkpoint at {checkpoint_path.name} has an 'options' key. "
f"This will override the options defined in configuration file."
)
warnings.warn(msg)
options = checkpoint["options"]
assert isinstance(options, dict)
self._reconstructor = reconstructor_cls(**options)
self._reconstructor.init_from_checkpoint(checkpoint)
self._reconstructor.eval()
self._reconstructor.to(self._device)
self._transform = activemri.envs.util.import_object_from_str(
reconstructor_cfg["transform"]
)
def _init_from_config_file(self, config_filename: str):
with open(config_filename, "rb") as f:
self._init_from_config_dict(json.load(f))
@staticmethod
def _void_transform(
kspace: torch.Tensor,
mask: torch.Tensor,
target: torch.Tensor,
attrs: List[Dict[str, Any]],
fname: List[str],
slice_id: List[int],
) -> Tuple:
return kspace, mask, target, attrs, fname, slice_id
def _send_tuple_to_device(self, the_tuple: Tuple[Union[Any, torch.Tensor]]):
the_tuple_device = []
for i in range(len(the_tuple)):
if isinstance(the_tuple[i], torch.Tensor):
the_tuple_device.append(the_tuple[i].to(self._device))
else:
the_tuple_device.append(the_tuple[i])
return tuple(the_tuple_device)
@staticmethod
def _send_dict_to_cpu_and_detach(the_dict: Dict[str, Union[Any, torch.Tensor]]):
the_dict_cpu = {}
for key in the_dict:
if isinstance(the_dict[key], torch.Tensor):
the_dict_cpu[key] = the_dict[key].detach().cpu()
else:
the_dict_cpu[key] = the_dict[key]
return the_dict_cpu
def _compute_obs_and_score(
self, override_current_mask: Optional[torch.Tensor] = None
) -> Tuple[Dict[str, Any], Dict[str, np.ndarray]]:
mask_to_use = (
override_current_mask
if override_current_mask is not None
else self._current_mask
)
reconstructor_input = self._transform_wrapper(
kspace=self._current_k_space,
mask=mask_to_use,
ground_truth=self._current_ground_truth,
)
reconstructor_input = self._send_tuple_to_device(reconstructor_input)
with torch.no_grad():
extra_outputs = self._reconstructor(*reconstructor_input)
extra_outputs = self._send_dict_to_cpu_and_detach(extra_outputs)
reconstruction = extra_outputs["reconstruction"]
# this dict is only for storing the other outputs
del extra_outputs["reconstruction"]
# noinspection PyUnusedLocal
reconstructor_input = None # de-referencing GPU tensors
score = self._compute_score_given_tensors(
*self._process_tensors_for_score_fns(
reconstruction, self._current_ground_truth
)
)
obs = {
"reconstruction": reconstruction,
"extra_outputs": extra_outputs,
"mask": self._current_mask.clone().view(self._current_mask.shape[0], -1),
}
return obs, score
def _clear_cache_and_unset_did_reset(self):
self._current_mask = None
self._current_ground_truth = None
self._current_reconstruction_numpy = None
self._transform_wrapper = None
self._current_k_space = None
self._current_score = None
self._steps_since_reset = 0
self._did_reset = False
# noinspection PyMethodMayBeStatic
def _process_tensors_for_score_fns(
self, reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return reconstruction, ground_truth
@staticmethod
def _compute_score_given_tensors(
reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Dict[str, np.ndarray]:
mse = activemri.envs.util.compute_mse(reconstruction, ground_truth)
nmse = activemri.envs.util.compute_nmse(reconstruction, ground_truth)
ssim = activemri.envs.util.compute_ssim(reconstruction, ground_truth)
psnr = activemri.envs.util.compute_psnr(reconstruction, ground_truth)
return {"mse": mse, "nmse": nmse, "ssim": ssim, "psnr": psnr}
@staticmethod
def _convert_to_gray(array: np.ndarray) -> np.ndarray:
M = np.max(array)
m = np.min(array)
return (255 * (array - m) / (M - m)).astype(np.uint8)
@staticmethod
def _render_arrays(
ground_truth: np.ndarray, reconstruction: np.ndarray, mask: np.ndarray
) -> List[np.ndarray]:
batch_size, img_height, img_width = ground_truth.shape
frames = []
for i in range(batch_size):
mask_i = np.tile(mask[i], (1, img_height, 1))
pad = 32
mask_begin = pad
mask_end = mask_begin + mask.shape[-1]
gt_begin = mask_end + pad
gt_end = gt_begin + img_width
rec_begin = gt_end + pad
rec_end = rec_begin + img_width
error_begin = rec_end + pad
error_end = error_begin + img_width
frame = 128 * np.ones((img_height, error_end + pad), dtype=np.uint8)
frame[:, mask_begin:mask_end] = 255 * mask_i
frame[:, gt_begin:gt_end] = ActiveMRIEnv._convert_to_gray(ground_truth[i])
frame[:, rec_begin:rec_end] = ActiveMRIEnv._convert_to_gray(
reconstruction[i]
)
rel_error = np.abs((ground_truth[i] - reconstruction[i]) / ground_truth[i])
frame[:, error_begin:error_end] = 255 * rel_error.astype(np.uint8)
frames.append(frame)
return frames
# -------------------------------------------------------------------------
# Public methods
# -------------------------------------------------------------------------
def reset(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Starts a new acquisition episode with a batch of images.
This methods performs the following steps:
1. Reads a batch of images from the environment's dataset.
2. Creates an initial acquisition mask for each image.
3. Passes the loaded data and the initial masks to the transform function,
producing a batch of inputs for the environment's reconstructor model.
4. Calls the reconstructor model on this input and returns its output
as an observation.
The observation returned is a dictionary with the following keys:
- *"reconstruction"(torch.Tensor):* The reconstruction produced by the
environment's reconstructor model, using the current
acquisition mask.
- *"extra_outputs"(dict(str,Any)):* A dictionary with any additional
outputs produced by the reconstructor (e.g., uncertainty maps).
- *"mask"(torch.Tensor):* The current acquisition mask.
Returns:
tuple: tuple containing:
- obs(dict(str,any): Observation dictionary.
- metadata(dict(str,any): Metadata information containing the following keys:
- *"fname"(list(str)):* the filenames of the image read from the dataset.
- *"slice_id"(list(int)):* slice indices for each image within the volume.
- *"current_score"(dict(str,float):* A dictionary with the error measures
for the reconstruction (e.g., "mse", "nmse", "ssim", "psnr"). The measures
considered can be obtained with :meth:`score_keys()`.
"""
self._did_reset = True
try:
kspace, _, ground_truth, attrs, fname, slice_id = next(
self._current_data_handler
)
except StopIteration:
return {}, {}
self._current_ground_truth = torch.from_numpy(np.stack(ground_truth))
# Converting k-space to torch is better handled by transform,
# since we have both complex and non-complex versions
self._current_k_space = kspace
self._transform_wrapper = functools.partial(
self._transform, attrs=attrs, fname=fname, slice_id=slice_id
)
kspace_shapes = [tuple(k.shape) for k in kspace]
self._current_mask = self._mask_func(kspace_shapes, self._rng, attrs=attrs)
obs, self._current_score = self._compute_obs_and_score()
self._current_reconstruction_numpy = obs["reconstruction"].cpu().numpy()
self._steps_since_reset = 0
meta = {
"fname": fname,
"slice_id": slice_id,
"current_score": self._current_score,
}
return obs, meta
def step(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], np.ndarray, List[bool], Dict]:
"""Performs a step of active MRI acquisition.
Given a set of indices for k-space columns to acquire, updates the current batch
of masks with their corresponding indices, creates a new batch of reconstructions,
and returns the corresponding observations and rewards (for the observation format
see :meth:`reset()`). The reward is the improvement in score with
respect to the reconstruction before adding the indices. The specific score metric
used is determined by ``env.reward_metric``.
The method also returns a list of booleans, indicating whether any episodes in the
batch have already concluded.
The last return value is a metadata dictionary. It contains a single key
"current_score", which contains a dictionary with the error measures for the
reconstruction (e.g., ``"mse", "nmse", "ssim", "psnr"``). The measures
considered can be obtained with :meth:`score_keys()`.
Args:
action(union(int, sequence(int))): Indices for k-space columns to acquire. The
length of the sequence must be equal to the
current number of parallel episodes
(i.e., ``obs["reconstruction"].shape[0]``).
If only an ``int`` is passed, the index will
be replicated for the whole batch of episodes.
Returns:
tuple: The transition information in the order
``(next_observation, reward, done, meta)``. The types and shapes are:
- ``next_observation(dict):`` Dictionary format (see :meth:`reset()`).
- ``reward(np.ndarray)``: length equal to current number of parallel
episodes.
- ``done(list(bool))``: same length as ``reward``.
- ``meta(dict)``: see description above.
"""
if not self._did_reset:
raise RuntimeError(
"Attempting to call env.step() before calling env.reset()."
)
if isinstance(action, int):
action = [action for _ in range(self.num_parallel_episodes)]
self._current_mask = activemri.envs.masks.update_masks_from_indices(
self._current_mask, action
)
obs, new_score = self._compute_obs_and_score()
self._current_reconstruction_numpy = obs["reconstruction"].cpu().numpy()
reward = new_score[self.reward_metric] - self._current_score[self.reward_metric]
if self.reward_metric in ["mse", "nmse"]:
reward *= -1
else:
assert self.reward_metric in ["ssim", "psnr"]
self._current_score = new_score
self._steps_since_reset += 1
done = activemri.envs.masks.check_masks_complete(self._current_mask)
if self.budget and self._steps_since_reset >= self.budget:
done = [True] * len(done)
return obs, reward, done, {"current_score": self._current_score}
def try_action(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], Dict[str, np.ndarray]]:
"""Simulates the effects of actions without changing the environment's state.
This method operates almost exactly as :meth:`step()`, with the exception that
the environment's state is not altered. The method returns the next observation
and the resulting reconstruction score after applying the give k-space columns to
each image in the current batch of episodes.
Args:
action(union(int, sequence(int))): Indices for k-space columns to acquire. The
length of the sequence must be equal to the
current number of parallel episodes
(i.e., ``obs["reconstruction"].shape[0]``).
If only an ``int`` is passed, the index will
be replicated for the whole batch of episodes.
Returns:
tuple: The reconstruction information in the order
``(next_observation, current_score)``. The types and shapes are:
- ``next_observation(dict):`` Dictionary format (see :meth:`reset()`).
- ``current_score(dict(str, float))``: A dictionary with the error measures
for the reconstruction (e.g., "mse", "nmse", "ssim", "psnr"). The measures
considered can be obtained with `ActiveMRIEnv.score_keys()`.
"""
if not self._did_reset:
raise RuntimeError(
"Attempting to call env.try_action() before calling env.reset()."
)
if isinstance(action, int):
action = [action for _ in range(self.num_parallel_episodes)]
new_mask = activemri.envs.masks.update_masks_from_indices(
self._current_mask, action
)
obs, new_score = self._compute_obs_and_score(override_current_mask=new_mask)
return obs, new_score
def render(self, mode="human"):
"""Renders information about the environment's current state.
Returns:
``np.ndarray``: An image frame containing, from left to right: current
acquisition mask, current ground image, current reconstruction,
and current relative reconstruction error.
"""
pass
def seed(self, seed: Optional[int] = None):
"""Sets the seed for the internal number generator.
This seeds affects the order of the data loader for all loop modalities (i.e.,
training, validation, test).
Args:
seed(optional(int)): The seed for the environment's random number generator.
"""
self._seed = seed
self._rng = np.random.RandomState(seed)
self._train_data_handler.seed(seed)
self._val_data_handler.seed(seed)
self._test_data_handler.seed(seed)
def set_training(self, reset: bool = False):
"""Sets the environment to use the training data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._train_data_handler.reset()
self._current_data_handler = self._train_data_handler
self._clear_cache_and_unset_did_reset()
def set_val(self, reset: bool = True):
"""Sets the environment to use the validation data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._val_data_handler.reset()
self._current_data_handler = self._val_data_handler
self._clear_cache_and_unset_did_reset()
def set_test(self, reset: bool = True):
"""Sets the environment to use the test data loader.
Args:
reset(bool): If ``True``, also resets the data loader so that it starts again
from the first image in the loop order.
Warning:
After this method is called the ``env.reset()`` needs to be called again, otherwise
an exception will be thrown.
"""
if reset:
self._test_data_handler.reset()
self._current_data_handler = self._test_data_handler
self._clear_cache_and_unset_did_reset()
@staticmethod
def score_keys() -> List[str]:
""" Returns the list of score metric names used by this environment. """
return ["mse", "nmse", "ssim", "psnr"]
# -----------------------------------------------------------------------------
# CUSTOM ENVIRONMENTS
# -----------------------------------------------------------------------------
class MICCAI2020Env(ActiveMRIEnv):
"""Implementation of environment used for *Pineda et al., MICCAI 2020*.
This environment is provided to facilitate replication of the experiments performed
in *Luis Pineda, Sumana Basu, Adriana Romero, Roberto Calandra, Michal Drozdzal,
"Active MR k-space Sampling with Reinforcement Learning". MICCAI 2020.*
The dataset is the same as that of :class:`SingleCoilKneeEnv`, except that we provide
a custom validation/test split of the original validation data. The environment's
configuration file is set to use the reconstruction model used in the paper
(see :class:`activemri.models.cvpr19_reconstructor.CVPR19Reconstructor`), as well
as the proper transform to generate inputs for this model.
The k-space shape of this environment is set to ``(640, 368)``.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
extreme(bool): ``True`` or ``False`` for running extreme acceleration or normal
acceleration scenarios described in the paper, respectively.
"""
KSPACE_WIDTH = scknee_data.MICCAI2020Data.KSPACE_WIDTH
START_PADDING = scknee_data.MICCAI2020Data.START_PADDING
END_PADDING = scknee_data.MICCAI2020Data.END_PADDING
CENTER_CROP_SIZE = scknee_data.MICCAI2020Data.CENTER_CROP_SIZE
def __init__(
self,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
extreme: bool = False,
obs_includes_padding: bool = True,
):
super().__init__(
(640, self.KSPACE_WIDTH),
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
)
if extreme:
self._setup("configs/miccai-2020-extreme-acc.json", self._create_dataset)
else:
self._setup("configs/miccai-2020-normal-acc.json", self._create_dataset)
self.obs_includes_padding = obs_includes_padding
# -------------------------------------------------------------------------
# Protected methods
# -------------------------------------------------------------------------
def _create_dataset(self) -> DataInitFnReturnType:
root_path = pathlib.Path(self._data_location)
train_path = root_path / "knee_singlecoil_train"
val_and_test_path = root_path / "knee_singlecoil_val"
train_data = scknee_data.MICCAI2020Data(
train_path,
ActiveMRIEnv._void_transform,
num_cols=self.KSPACE_WIDTH,
)
val_data = scknee_data.MICCAI2020Data(
val_and_test_path,
ActiveMRIEnv._void_transform,
custom_split="val",
num_cols=self.KSPACE_WIDTH,
)
test_data = scknee_data.MICCAI2020Data(
val_and_test_path,
ActiveMRIEnv._void_transform,
custom_split="test",
num_cols=self.KSPACE_WIDTH,
)
return train_data, val_data, test_data
def _process_tensors_for_score_fns(
self, reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# Compute magnitude (for metrics)
reconstruction = activemri.data.transforms.to_magnitude(reconstruction, dim=3)
ground_truth = activemri.data.transforms.to_magnitude(ground_truth, dim=3)
reconstruction = activemri.data.transforms.center_crop(
reconstruction, (self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE)
)
ground_truth = activemri.data.transforms.center_crop(
ground_truth, (self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE)
)
return reconstruction, ground_truth
# -------------------------------------------------------------------------
# Public methods
# -------------------------------------------------------------------------
def reset(
self,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
obs, meta = super().reset()
if not obs:
return obs, meta
if self.obs_includes_padding:
obs["mask"][:, self.START_PADDING : self.END_PADDING] = 1
return obs, meta
def step(
self, action: Union[int, Sequence[int]]
) -> Tuple[Dict[str, Any], np.ndarray, List[bool], Dict]:
obs, reward, done, meta = super().step(action)
if self.obs_includes_padding:
obs["mask"][:, self.START_PADDING : self.END_PADDING] = 1
return obs, reward, done, meta
def render(self, mode="human"):
gt = self._current_ground_truth.cpu().numpy()
rec = self._current_reconstruction_numpy
gt = activemri.data.transforms.center_crop(
(gt ** 2).sum(axis=3) ** 0.5, (self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE)
)
rec = activemri.data.transforms.center_crop(
(rec ** 2).sum(axis=3) ** 0.5,
(self.CENTER_CROP_SIZE, self.CENTER_CROP_SIZE),
)
return ActiveMRIEnv._render_arrays(gt, rec, self._current_mask.cpu().numpy())
class FastMRIEnv(ActiveMRIEnv):
"""Base class for all fastMRI environments.
This class can be used to instantiate active acquisition environments using fastMRI
data. However, for convenience we provide subclasses of ``FastMRIEnv`` with
default configuration options for each dataset:
- :class:`SingleCoilKneeEnv`
- :class:`MultiCoilKneeEnv`
- :class:`SingleCoilBrainEnv`
- :class:`MultiCoilKneeEnv`
Args:
config_path(str): The path to the JSON configuration file.
dataset_name(str): One of "knee_singlecoil", "multicoil" (for knee),
"brain_multicoil". Primarily used to locate the fastMRI
dataset in the user's fastMRI data root folder.
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
num_cols(sequence(int)): Used to filter k-space data to only use images whose k-space
width is in this tuple. Defaults to ``(368, 372)``.
"""
def __init__(
self,
config_path: str,
dataset_name: str,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
num_cols: Sequence[int] = (368, 372),
):
assert dataset_name in ["knee_singlecoil", "multicoil", "brain_multicoil"]
challenge = "singlecoil" if dataset_name == "knee_singlecoil" else "multicoil"
super().__init__(
(640, np.max(num_cols)),
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
)
self.num_cols = num_cols
self.dataset_name = dataset_name
self.challenge = challenge
self._setup(config_path, self._create_dataset)
def _create_dataset(self) -> DataInitFnReturnType:
root_path = pathlib.Path(self._data_location)
datacache_dir = activemri.envs.util.maybe_create_datacache_dir()
train_path = root_path / f"{self.dataset_name}_train"
val_path = root_path / f"{self.dataset_name}_val"
val_cache_file = datacache_dir / f"val_{self.dataset_name}_cache.pkl"
test_path = root_path / f"{self.dataset_name}_test"
test_cache_file = datacache_dir / f"test_{self.dataset_name}_cache.pkl"
if not test_path.is_dir():
warnings.warn(
f"No test directory found for {self.dataset_name}. "
f"I will use val directory for test model (env.set_test())."
)
test_path = val_path
test_cache_file = val_cache_file
train_data = fastmri.data.SliceDataset(
train_path,
ActiveMRIEnv._void_transform,
challenge=self.challenge,
num_cols=self.num_cols,
dataset_cache_file=datacache_dir / f"train_{self.dataset_name}_cache.pkl",
)
val_data = fastmri.data.SliceDataset(
val_path,
ActiveMRIEnv._void_transform,
challenge=self.challenge,
num_cols=self.num_cols,
dataset_cache_file=val_cache_file,
)
test_data = fastmri.data.SliceDataset(
test_path,
ActiveMRIEnv._void_transform,
challenge=self.challenge,
num_cols=self.num_cols,
dataset_cache_file=test_cache_file,
)
return train_data, val_data, test_data
def render(self, mode="human"):
return ActiveMRIEnv._render_arrays(
self._current_ground_truth.cpu().numpy(),
self._current_reconstruction_numpy,
self._current_mask.cpu().numpy(),
)
class SingleCoilKneeEnv(FastMRIEnv):
"""Convenience class to access single-coil knee data.
Loads the configuration from ``configs/single-coil-knee.json``.
Looks for datasets named "knee_singlecoil_{train/val/test}" under the ``data_location`` dir.
If "test" is not found, it uses "val" folder for test mode.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
num_cols(sequence(int)): Used to filter k-space data to only use images whose k-space
width is in this tuple. Defaults to ``(368, 372)``.
"""
def __init__(
self,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
num_cols: Sequence[int] = (368, 372),
):
super().__init__(
"configs/single-coil-knee.json",
"knee_singlecoil",
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
num_cols=num_cols,
)
class MultiCoilKneeEnv(FastMRIEnv):
"""Convenience class to access multi-coil knee data.
Loads the configuration from ``configs/multi-coil-knee.json``.
Looks for datasets named "multicoil_{train/val/test}" under default ``data_location`` dir.
If "test" is not found, it uses "val" folder for test mode.
Args:
num_parallel_episodes(int): Determines the number images that will be processed
simultaneously by :meth:`reset()` and :meth:`step()`.
Defaults to 1.
budget(optional(int)): The length of an acquisition episode. Defaults to ``None``,
which indicates that episode will continue until all k-space
columns have been acquired.
seed(optional(int)): The seed for the environment's random number generator, which is
an instance of ``numpy.random.RandomState``. Defaults to ``None``.
num_cols(sequence(int)): Used to filter k-space data to only use images whose k-space
width is in this tuple. Defaults to ``(368, 372)``.
"""
def __init__(
self,
num_parallel_episodes: int = 1,
budget: Optional[int] = None,
seed: Optional[int] = None,
num_cols: Sequence[int] = (368, 372),
):
super().__init__(
"configs/multi-coil-knee.json",
"multicoil",
num_parallel_episodes=num_parallel_episodes,
budget=budget,
seed=seed,
num_cols=num_cols,
)
| active-mri-acquisition-main | activemri/envs/envs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import json
import pathlib
from typing import Dict, Tuple
import numpy as np
import skimage.metrics
import torch
def get_user_dir() -> pathlib.Path:
return pathlib.Path.home() / ".activemri"
def maybe_create_datacache_dir() -> pathlib.Path:
datacache_dir = get_user_dir() / "__datacache__"
if not datacache_dir.is_dir():
datacache_dir.mkdir()
return datacache_dir
def get_defaults_json() -> Tuple[Dict[str, str], str]:
defaults_path = get_user_dir() / "defaults.json"
if not pathlib.Path.exists(defaults_path):
parent = defaults_path.parents[0]
parent.mkdir(exist_ok=True)
content = {"data_location": "", "saved_models_dir": ""}
with defaults_path.open("w", encoding="utf-8") as f:
json.dump(content, f)
else:
with defaults_path.open("r", encoding="utf-8") as f:
content = json.load(f)
return content, str(defaults_path)
def import_object_from_str(classname: str):
the_module, the_object = classname.rsplit(".", 1)
the_object = classname.split(".")[-1]
module = importlib.import_module(the_module)
return getattr(module, the_object)
def compute_ssim(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
ssims = []
for i in range(xs.shape[0]):
ssim = skimage.metrics.structural_similarity(
xs[i].cpu().numpy(),
ys[i].cpu().numpy(),
data_range=ys[i].cpu().numpy().max(),
)
ssims.append(ssim)
return np.array(ssims, dtype=np.float32)
def compute_psnr(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
psnrs = []
for i in range(xs.shape[0]):
psnr = skimage.metrics.peak_signal_noise_ratio(
xs[i].cpu().numpy(),
ys[i].cpu().numpy(),
data_range=ys[i].cpu().numpy().max(),
)
psnrs.append(psnr)
return np.array(psnrs, dtype=np.float32)
def compute_mse(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
dims = tuple(range(1, len(xs.shape)))
return np.mean((ys.cpu().numpy() - xs.cpu().numpy()) ** 2, axis=dims)
def compute_nmse(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
ys_numpy = ys.cpu().numpy()
nmses = []
for i in range(xs.shape[0]):
x = xs[i].cpu().numpy()
y = ys_numpy[i]
nmse = np.linalg.norm(y - x) ** 2 / np.linalg.norm(y) ** 2
nmses.append(nmse)
return np.array(nmses, dtype=np.float32)
| active-mri-acquisition-main | activemri/envs/util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = [
"ActiveMRIEnv",
"MICCAI2020Env",
"FastMRIEnv",
"SingleCoilKneeEnv",
"MultiCoilKneeEnv",
]
from .envs import (
ActiveMRIEnv,
FastMRIEnv,
MICCAI2020Env,
MultiCoilKneeEnv,
SingleCoilKneeEnv,
)
| active-mri-acquisition-main | activemri/envs/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.envs.masks.py
====================================
Utilities to generate and manipulate active acquisition masks.
"""
from typing import Any, Dict, List, Optional, Sequence, Tuple
import fastmri
import numpy as np
import torch
def update_masks_from_indices(
masks: torch.Tensor, indices: Sequence[int]
) -> torch.Tensor:
assert masks.shape[0] == len(indices)
new_masks = masks.clone()
for i in range(len(indices)):
new_masks[i, ..., indices[i]] = 1
return new_masks
def check_masks_complete(masks: torch.Tensor) -> List[bool]:
done = []
for mask in masks:
done.append(mask.bool().all().item())
return done
def sample_low_frequency_mask(
mask_args: Dict[str, Any],
kspace_shapes: List[Tuple[int, ...]],
rng: np.random.RandomState,
attrs: Optional[List[Dict[str, Any]]] = None,
) -> torch.Tensor:
"""Samples low frequency masks.
Returns masks that contain some number of the lowest k-space frequencies active.
The number of frequencies doesn't have to be the same for all masks in the batch, and
it can also be a random number, depending on the given ``mask_args``. Active columns
will be represented as 1s in the mask, and inactive columns as 0s.
The distribution and shape of the masks can be controlled by ``mask_args``. This is a
dictionary with the following keys:
- *"max_width"(int)*: The maximum width of the masks.
- *"min_cols"(int)*: The minimum number of low frequencies columns to activate per side.
- *"max_cols"(int)*: The maximum number of low frequencies columns to activate
per side (inclusive).
- *"width_dim"(int)*: Indicates which of the dimensions in ``kspace_shapes``
corresponds to the k-space width.
- *"centered"(bool)*: Specifies if the low frequencies are in the center of the
k-space (``True``) or on the edges (``False``).
- *"apply_attrs_padding"(optional(bool))*: If ``True``, the function will read
keys ``"padding_left"`` and ``"padding_right"`` from ``attrs`` and set all
corresponding high-frequency columns to 1.
The number of 1s in the effective region of the mask (see next paragraph) is sampled
between ``mask_args["min_cols"]`` and ``mask_args["max_cols"]`` (inclusive).
The number of dimensions for the mask tensor will be ``mask_args["width_dim"] + 2``.
The size will be ``[batch_size, 1, ..., 1, mask_args["max_width"]]``. For example, with
``mask_args["width_dim"] = 1`` and ``mask_args["max_width"] = 368``, output tensor
has shape ``[batch_size, 1, 368]``.
This function supports simultaneously sampling masks for k-space of different number of
columns. This is controlled by argument ``kspace_shapes``. From this list, the function will
obtain 1) ``batch_size = len(kspace_shapes``), and 2) the width of the k-spaces for
each element in the batch. The i-th mask will have
``kspace_shapes[item][mask_args["width_dim"]]``
*effective* columns.
Note:
The mask tensor returned will always have
``mask_args["max_width"]`` columns. However, for any element ``i``
s.t. ``kspace_shapes[i][mask_args["width_dim"]] < mask_args["max_width"]``, the
function will then pad the extra k-space columns with 1s. The rest of the columns
will be filled out as if the mask has the same width as that indicated by
``kspace_shape[i]``.
Args:
mask_args(dict(str,any)): Specifies configuration options for the masks, as explained
above.
kspace_shapes(list(tuple(int,...))): Specifies the shapes of the k-space data on
which this mask will be applied, as explained above.
rng(``np.random.RandomState``): A random number generator to sample the masks.
attrs(dict(str,int)): Used to determine any high-frequency padding. It must contain
keys ``"padding_left"`` and ``"padding_right"``.
Returns:
``torch.Tensor``: The generated low frequency masks.
"""
batch_size = len(kspace_shapes)
num_cols = [shape[mask_args["width_dim"]] for shape in kspace_shapes]
mask = torch.zeros(batch_size, mask_args["max_width"])
num_low_freqs = rng.randint(
mask_args["min_cols"], mask_args["max_cols"] + 1, size=batch_size
)
for i in range(batch_size):
# If padding needs to be accounted for, only add low frequency lines
# beyond the padding
if attrs and mask_args.get("apply_attrs_padding", False):
padding_left = attrs[i]["padding_left"]
padding_right = attrs[i]["padding_right"]
else:
padding_left, padding_right = 0, num_cols[i]
pad = (num_cols[i] - 2 * num_low_freqs[i] + 1) // 2
mask[i, pad : pad + 2 * num_low_freqs[i]] = 1
mask[i, :padding_left] = 1
mask[i, padding_right : num_cols[i]] = 1
if not mask_args["centered"]:
mask[i, : num_cols[i]] = fastmri.ifftshift(mask[i, : num_cols[i]])
mask[i, num_cols[i] : mask_args["max_width"]] = 1
mask_shape = [batch_size] + [1] * (mask_args["width_dim"] + 1)
mask_shape[mask_args["width_dim"] + 1] = mask_args["max_width"]
return mask.view(*mask_shape)
| active-mri-acquisition-main | activemri/envs/masks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
activemri.data.transforms.py
====================================
Transform functions to process fastMRI data for reconstruction models.
"""
from typing import Tuple, Union
import fastmri
import fastmri.data.transforms as fastmri_transforms
import numpy as np
import torch
import activemri.data.singlecoil_knee_data as scknee_data
TensorType = Union[np.ndarray, torch.Tensor]
def to_magnitude(tensor: torch.Tensor, dim: int) -> torch.Tensor:
return (tensor ** 2).sum(dim=dim) ** 0.5
def center_crop(x: TensorType, shape: Tuple[int, int]) -> TensorType:
"""Center crops a tensor to the desired 2D shape.
Args:
x(union(``torch.Tensor``, ``np.ndarray``)): The tensor to crop.
Shape should be ``(batch_size, height, width)``.
shape(tuple(int,int)): The desired shape to crop to.
Returns:
(union(``torch.Tensor``, ``np.ndarray``)): The cropped tensor.
"""
assert len(x.shape) == 3
assert 0 < shape[0] <= x.shape[1]
assert 0 < shape[1] <= x.shape[2]
h_from = (x.shape[1] - shape[0]) // 2
w_from = (x.shape[2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
x = x[:, h_from:h_to, w_from:w_to]
return x
def ifft_permute_maybe_shift(
x: torch.Tensor, normalized: bool = False, ifft_shift: bool = False
) -> torch.Tensor:
x = x.permute(0, 2, 3, 1)
y = torch.ifft(x, 2, normalized=normalized)
if ifft_shift:
y = fastmri.ifftshift(y, dim=(1, 2))
return y.permute(0, 3, 1, 2)
def raw_transform_miccai2020(kspace=None, mask=None, **_kwargs):
"""Transform to produce input for reconstructor used in `Pineda et al. MICCAI'20 <https://arxiv.org/pdf/2007.10469.pdf>`_.
Produces a zero-filled reconstruction and a mask that serve as a input to models of type
:class:`activemri.models.cvpr10_reconstructor.CVPR19Reconstructor`. The mask is almost
equal to the mask passed as argument, except that high-frequency padding columns are set
to 1, and the mask is reshaped to be compatible with the reconstructor.
Args:
kspace(``np.ndarray``): The array containing the k-space data returned by the dataset.
mask(``torch.Tensor``): The masks to apply to the k-space.
Returns:
tuple: A tuple containing:
- ``torch.Tensor``: The zero-filled reconstructor that will be passed to the
reconstructor.
- ``torch.Tensor``: The mask to use as input to the reconstructor.
"""
# alter mask to always include the highest frequencies that include padding
mask[
:,
:,
scknee_data.MICCAI2020Data.START_PADDING : scknee_data.MICCAI2020Data.END_PADDING,
] = 1
mask = mask.unsqueeze(1)
all_kspace = []
for ksp in kspace:
all_kspace.append(torch.from_numpy(ksp).permute(2, 0, 1))
k_space = torch.stack(all_kspace)
masked_true_k_space = torch.where(
mask.byte(),
k_space,
torch.tensor(0.0).to(mask.device),
)
reconstructor_input = ifft_permute_maybe_shift(masked_true_k_space, ifft_shift=True)
return reconstructor_input, mask
# Based on
# https://github.com/facebookresearch/fastMRI/blob/master/experimental/unet/unet_module.py
def _base_fastmri_unet_transform(
kspace,
mask,
ground_truth,
attrs,
which_challenge="singlecoil",
):
kspace = fastmri_transforms.to_tensor(kspace)
mask = mask[..., : kspace.shape[-2]] # accounting for variable size masks
masked_kspace = kspace * mask.unsqueeze(-1) + 0.0
# inverse Fourier transform to get zero filled solution
image = fastmri.ifft2c(masked_kspace)
# crop input to correct size
if ground_truth is not None:
crop_size = (ground_truth.shape[-2], ground_truth.shape[-1])
else:
crop_size = (attrs["recon_size"][0], attrs["recon_size"][1])
# check for FLAIR 203
if image.shape[-2] < crop_size[1]:
crop_size = (image.shape[-2], image.shape[-2])
# noinspection PyTypeChecker
image = fastmri_transforms.complex_center_crop(image, crop_size)
# absolute value
image = fastmri.complex_abs(image)
# apply Root-Sum-of-Squares if multicoil data
if which_challenge == "multicoil":
image = fastmri.rss(image)
# normalize input
image, mean, std = fastmri_transforms.normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
return image.unsqueeze(0), mean, std
def _batched_fastmri_unet_transform(
kspace, mask, ground_truth, attrs, which_challenge="singlecoil"
):
batch_size = len(kspace)
images, means, stds = [], [], []
for i in range(batch_size):
image, mean, std = _base_fastmri_unet_transform(
kspace[i],
mask[i],
ground_truth[i],
attrs[i],
which_challenge=which_challenge,
)
images.append(image)
means.append(mean)
stds.append(std)
return torch.stack(images), torch.stack(means), torch.stack(stds)
# noinspection PyUnusedLocal
def fastmri_unet_transform_singlecoil(
kspace=None, mask=None, ground_truth=None, attrs=None, fname=None, slice_id=None
):
"""
Transform to use as input to fastMRI's Unet model for singlecoil data.
This is an adapted version of the code found in
`fastMRI <https://github.com/facebookresearch/fastMRI/blob/master/experimental/unet/unet_module.py#L190>`_.
"""
return _batched_fastmri_unet_transform(
kspace, mask, ground_truth, attrs, "singlecoil"
)
# noinspection PyUnusedLocal
def fastmri_unet_transform_multicoil(
kspace=None, mask=None, ground_truth=None, attrs=None, fname=None, slice_id=None
):
"""Transform to use as input to fastMRI's Unet model for multicoil data.
This is an adapted version of the code found in
`fastMRI <https://github.com/facebookresearch/fastMRI/blob/master/experimental/unet/unet_module.py#L190>`_.
"""
return _batched_fastmri_unet_transform(
kspace, mask, ground_truth, attrs, "multicoil"
)
| active-mri-acquisition-main | activemri/data/transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
from typing import Callable, List, Optional, Tuple
import fastmri
import h5py
import numpy as np
import torch.utils.data
# -----------------------------------------------------------------------------
# Single coil knee dataset (as used in MICCAI'20)
# -----------------------------------------------------------------------------
class MICCAI2020Data(torch.utils.data.Dataset):
# This is the same as fastMRI singlecoil_knee, except we provide a custom test split
# and also normalize images by the mean norm of the k-space over training data
KSPACE_WIDTH = 368
KSPACE_HEIGHT = 640
START_PADDING = 166
END_PADDING = 202
CENTER_CROP_SIZE = 320
def __init__(
self,
root: pathlib.Path,
transform: Callable,
num_cols: Optional[int] = None,
num_volumes: Optional[int] = None,
num_rand_slices: Optional[int] = None,
custom_split: Optional[str] = None,
):
self.transform = transform
self.examples: List[Tuple[pathlib.PurePath, int]] = []
self.num_rand_slices = num_rand_slices
self.rng = np.random.RandomState(1234)
files = []
for fname in list(pathlib.Path(root).iterdir()):
data = h5py.File(fname, "r")
if num_cols is not None and data["kspace"].shape[2] != num_cols:
continue
files.append(fname)
if custom_split is not None:
split_info = []
with open(f"activemri/data/splits/knee_singlecoil/{custom_split}.txt") as f:
for line in f:
split_info.append(line.rsplit("\n")[0])
files = [f for f in files if f.name in split_info]
if num_volumes is not None:
self.rng.shuffle(files)
files = files[:num_volumes]
for volume_i, fname in enumerate(sorted(files)):
data = h5py.File(fname, "r")
kspace = data["kspace"]
if num_rand_slices is None:
num_slices = kspace.shape[0]
self.examples += [(fname, slice_id) for slice_id in range(num_slices)]
else:
slice_ids = list(range(kspace.shape[0]))
self.rng.seed(seed=volume_i)
self.rng.shuffle(slice_ids)
self.examples += [
(fname, slice_id) for slice_id in slice_ids[:num_rand_slices]
]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice_id = self.examples[i]
with h5py.File(fname, "r") as data:
kspace = data["kspace"][slice_id]
kspace = torch.from_numpy(np.stack([kspace.real, kspace.imag], axis=-1))
kspace = fastmri.ifftshift(kspace, dim=(0, 1))
target = torch.ifft(kspace, 2, normalized=False)
target = fastmri.ifftshift(target, dim=(0, 1))
# Normalize using mean of k-space in training data
target /= 7.072103529760345e-07
kspace /= 7.072103529760345e-07
# Environment expects numpy arrays. The code above was used with an older
# version of the environment to generate the results of the MICCAI'20 paper.
# So, to keep this consistent with the version in the paper, we convert
# the tensors back to numpy rather than changing the original code.
kspace = kspace.numpy()
target = target.numpy()
return self.transform(
kspace,
torch.zeros(kspace.shape[1]),
target,
dict(data.attrs),
fname.name,
slice_id,
)
| active-mri-acquisition-main | activemri/data/singlecoil_knee_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import numpy as np
import torch
from . import singlecoil_knee_data
from . import transforms
__all__ = ["singlecoil_knee_data", "transforms"]
def transform_template(
kspace: List[np.ndarray] = None,
mask: torch.Tensor = None,
ground_truth: torch.Tensor = None,
attrs: List[Dict[str, Any]] = None,
fname: List[str] = None,
slice_id: List[int] = None,
):
"""Template for transform functions.
Args:
- kspace(list(np.ndarray)): A list of complex numpy arrays, one per k-space in the batch.
The length is the ``batch_size``, and array shapes are ``H x W x 2`` for single coil data,
and ``C x H x W x 2`` for multicoil data, where ``H`` denotes k-space height, ``W``
denotes k-space width, and ``C`` is the number of coils. Note that the width can differ
between batch elements, if ``num_cols`` is set to a tuple when creating the environment.
- mask(torch.Tensor): A tensor of binary column masks, where 1s indicate that the
corresponding k-space column should be selected. The shape is ``batch_size x 1 x maxW``,
for single coil data, and ``batch_size x 1 x 1 x maxW`` for multicoil data. Here ``maxW``
is the maximum k-space width returned by the environment.
- ground_truth(torch.Tensor): A tensor of ground truth 2D images. The shape is
``batch_size x 320 x 320``.
- attrs(list(dict)): A list of dictionaries with the attributes read from the fastMRI for
each image.
- fname(list(str)): A list of the filenames where the images where read from.
- slice_id(list(int)): A list with the slice ids in the files where each image was read
from.
Returns:
tuple(Any...): A tuple with any number of inputs required by the reconstructor model.
"""
pass
| active-mri-acquisition-main | activemri/data/__init__.py |
active-mri-acquisition-main | tests/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import numpy as np
import pytest # noqa: F401
import torch
import activemri.envs.envs as envs
import activemri.envs.util as util
from . import mocks
def test_import_object_from_str():
ceil = util.import_object_from_str("math.ceil")
assert 3 == ceil(2.5)
det = util.import_object_from_str("numpy.linalg.det")
assert det(np.array([[1, 0], [0, 1]])) == 1
def test_random_cyclic_sampler_default_order():
alist = [0, 1, 2]
sampler = envs.CyclicSampler(alist, None, loops=10)
cnt = 0
for i, x in enumerate(sampler):
assert alist[x] == i % 3
cnt += 1
assert cnt == 30
def test_random_cyclic_sampler_default_given_order():
alist = [1, 2, 0]
sampler = envs.CyclicSampler(alist, order=[2, 0, 1], loops=10)
cnt = 0
for i, x in enumerate(sampler):
assert alist[x] == i % 3
cnt += 1
assert cnt == 30
def test_data_handler():
data = list(range(10))
batch_size = 2
loops = 3
handler = envs.DataHandler(data, None, batch_size=batch_size, loops=loops)
cnt = dict([(x, 0) for x in data])
for x in handler:
assert len(x) == batch_size
for t in x:
v = t.item()
cnt[v] = cnt[v] + 1
for x in cnt:
assert cnt[x] == loops
# noinspection PyProtectedMember,PyClassHasNoInit
class TestActiveMRIEnv:
def test_init_from_config_dict(self):
env = envs.ActiveMRIEnv((32, 64))
env._init_from_config_dict(mocks.config_dict)
assert env.reward_metric == "ssim"
assert type(env._reconstructor) == mocks.Reconstructor
assert env._reconstructor.option1 == 1
assert env._reconstructor.option2 == 0.5
assert env._reconstructor.option3 == "dummy"
assert env._reconstructor.option4
assert env._reconstructor.weights == "init"
assert env._reconstructor._eval
assert env._reconstructor.device == torch.device("cpu")
assert env._transform("x", "m") == ("x", "m")
batch_size = 3
shapes = [(1, 2) for _ in range(batch_size)]
mask = env._mask_func(shapes, "rng")
assert mask.shape == (batch_size, env._cfg["mask"]["args"]["size"])
def test_init_sets_action_space(self):
env = envs.ActiveMRIEnv((32, 64))
for i in range(64):
assert env.action_space.contains(i)
assert env.action_space.n == 64
def test_reset_and_step(self):
# the mock environment is set up to use mocks.Reconstructor
# and mocks.mask_function.
# The mask and data will be tensors of size D (env._tensor_size)
# Initial mask will be:
# [1 1 1 0 0 .... 0] (needs 7 actions)
# [1 1 0 0 0 .... 0] (needs 8 actions)
# Ground truth is X * ones(D, D)
# K-space is (X - 1) * ones(D D)
# Reconstruction is K-space + Mask. So, with the initial mask we have
# sum |reconstruction - gt| = D^2 - 3D for first element of batch,
# and = D^2 - 2D for second element.
env = mocks.MRIEnv(num_parallel_episodes=2, loops_train=1, num_train=2)
obs, _ = env.reset()
# env works with shape (batch, height, width, {real/img})
assert tuple(obs["reconstruction"].shape) == (
env.num_parallel_episodes,
env._tensor_size,
env._tensor_size,
2,
)
assert "ssim" in env._current_score
mask_idx0_initial_active = env._cfg["mask"]["args"]["how_many"]
mask_idx1_initial_active = mask_idx0_initial_active - 1
def expected_score(step):
# See explanation above, plus every steps adds one more 1 to mask.
s = env._tensor_size
total = s ** 2
return 2 * (
(total - (mask_idx0_initial_active + step) * s)
+ (total - (mask_idx1_initial_active + step) * s)
)
assert env._current_score["ssim"] == expected_score(0)
prev_score = env._current_score["ssim"]
for action in range(mask_idx0_initial_active, env._tensor_size):
obs, reward, done, _ = env.step(action)
assert env._current_score["ssim"] == expected_score(
action - mask_idx1_initial_active
)
assert reward == env._current_score["ssim"] - prev_score
prev_score = env._current_score["ssim"]
if action < 9:
assert done == [False, False]
else:
assert done == [True, False]
obs, reward, done, _ = env.step(mask_idx1_initial_active)
assert env._current_score["ssim"] == 0.0
assert reward == -prev_score
assert done == [True, True]
def test_training_loop_ends(self):
env = envs.ActiveMRIEnv((32, 64), num_parallel_episodes=3)
env._num_loops_train_data = 3
env._init_from_config_dict(mocks.config_dict)
env._compute_score_given_tensors = lambda x, y: {"mock": 0}
num_train = 10
tensor_size = env._cfg["mask"]["args"]["size"]
data_init_fn = mocks.make_data_init_fn(tensor_size, num_train, 0, 0)
env._setup_data_handlers(data_init_fn)
seen = dict([(x, 0) for x in range(num_train)])
for _ in range(1000):
obs, meta = env.reset()
if not obs:
cnt_seen = functools.reduce(lambda x, y: x + y, seen.values())
assert cnt_seen == num_train * env._num_loops_train_data
break
slice_ids = meta["slice_id"]
for slice_id in slice_ids:
assert slice_id < num_train
seen[slice_id] = seen[slice_id] + 1
for i in range(num_train):
assert seen[i] == env._num_loops_train_data
def test_alternate_loop_modes(self):
# This tests if the environment can change correctly between train, val, and test
# datasets.
num_train, num_val, num_test = 10, 7, 5
env = mocks.MRIEnv(
num_parallel_episodes=1,
loops_train=2,
num_train=num_train,
num_val=num_val,
num_test=num_test,
)
# For each iteration of train data we will do a full loop over validation
# and a partial loop over test.
seen_train = dict([(x, 0) for x in range(num_train)])
seen_val = dict([(x, 0) for x in range(num_val)])
seen_test = dict([(x, 0) for x in range(num_test)])
for i in range(1000):
env.set_training()
obs, meta = env.reset()
if not obs:
break
for slice_id in meta["slice_id"]:
seen_train[slice_id] = seen_train[slice_id] + 1
env.set_val()
for j in range(num_val + 1):
obs, meta = env.reset()
if not obs:
cnt_seen = functools.reduce(lambda x, y: x + y, seen_val.values())
assert cnt_seen == (i + 1) * num_val
break
assert j < num_val
for slice_id in meta["slice_id"]:
seen_val[slice_id] = seen_val[slice_id] + 1
# With num_test - 1 we check that next call starts from 0 index again
# even if not all images visited. One of the elements in test set should have
# never been seen (data_handler will permute the indices so we don't know
# which index it will be)
env.set_test()
for _ in range(num_test - 1):
obs, meta = env.reset()
assert obs
for slice_id in meta["slice_id"]:
seen_test[slice_id] = seen_test[slice_id] + 1
for i in range(num_train):
assert seen_train[i] == env._num_loops_train_data
for i in range(num_val):
assert seen_val[i] == env._num_loops_train_data * num_train
cnt_not_seen = 0
for i in range(num_test):
if seen_test[i] != 0:
assert seen_test[i] == env._num_loops_train_data * num_train
else:
cnt_not_seen += 1
assert cnt_not_seen == 1
def test_seed(self):
num_train = 10
env = mocks.MRIEnv(
num_parallel_episodes=1, loops_train=1, num_train=num_train, seed=0
)
def get_current_order():
order = []
for _ in range(num_train):
obs, _ = env.reset()
order.append(obs["reconstruction"].sum().int().item())
return order
order_1 = get_current_order()
env.seed(123)
order_2 = get_current_order()
env.seed(0)
order_3 = get_current_order()
assert set(order_1) == set(order_2)
assert any([a != b for a, b in zip(order_1, order_2)])
assert all([a == b for a, b in zip(order_1, order_3)])
| active-mri-acquisition-main | tests/core/test_envs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import torch
import activemri.envs.masks as masks
def test_update_masks_from_indices():
mask_1 = torch.tensor([[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], dtype=torch.uint8)
mask_2 = torch.tensor([[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]], dtype=torch.uint8)
mask = torch.stack([mask_1, mask_2])
mask = masks.update_masks_from_indices(mask, np.array([2, 0]))
assert mask.shape == torch.Size([2, 3, 4])
expected = torch.tensor(
[[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]], dtype=torch.uint8
).repeat(2, 1, 1)
assert (mask - expected).sum().item() == 0
def test_sample_low_freq_masks():
for centered in [True, False]:
max_width = 20
mask_args = {
"max_width": max_width,
"width_dim": 1,
"min_cols": 1,
"max_cols": 4,
"centered": centered,
}
rng = np.random.RandomState()
widths = [10, 12, 18, 20]
seen_cols = set()
for i in range(1000):
dummy_shapes = [(0, w) for w in widths] # w is in args.width_dim
the_masks = masks.sample_low_frequency_mask(mask_args, dummy_shapes, rng)
assert the_masks.shape == (len(widths), 1, 20)
the_masks = the_masks.squeeze()
for j, w in enumerate(widths):
# Mask is symmetrical
assert torch.all(
the_masks[j, : w // 2]
== torch.flip(the_masks[j, w // 2 : w], dims=[0])
)
# Extra columns set to one so that they are not valid actions
assert the_masks[j, w:].sum().item() == max_width - w
# Check that the number of columns is in the correct range
active = the_masks[j, :w].sum().int().item()
assert active >= 2 * mask_args["min_cols"]
assert active <= 2 * mask_args["max_cols"]
seen_cols.add(active // 2)
# These masks should be either something like
# 1100000011|111111111 (not centered)
# 0000110000|111111111 (centered)
# The lines below check for this
prev = the_masks[j, 0]
changed = False
for k in range(1, w // 2):
cur = the_masks[j, k]
if cur != prev:
assert not changed
changed = True
prev = cur
assert changed
if centered:
assert not the_masks[j, 0]
else:
assert the_masks[j, 0]
# Check that masks were sampled with all possible number of active cols
assert len(seen_cols) == (mask_args["max_cols"] - mask_args["min_cols"] + 1)
| active-mri-acquisition-main | tests/core/test_masks.py |
active-mri-acquisition-main | tests/core/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest # noqa: F401
import torch
import activemri.baselines as baselines
def test_random():
policy = baselines.RandomPolicy()
bs = 4
mask = torch.zeros(bs, 10)
mask[:, :3] = 1
mask[0, :7] = 1
obs = {"mask": mask}
steps = 5
for i in range(steps):
action = policy(obs)
assert len(action) == bs
for j in range(bs):
if j > 0 or (j == 0 and i < 3):
assert obs["mask"][j, action[j]] == 0
obs["mask"][j, action[j]] = 1
assert obs["mask"].sum().item() == 34
def test_low_to_high_no_alternate():
policy = baselines.LowestIndexPolicy(alternate_sides=False, centered=False)
mask = torch.zeros(2, 10)
mask[0, 0::2] = 1
mask[1, 1::2] = 1
obs = {"mask": mask}
for i in range(5):
action = policy(obs)
assert len(action) == 2
assert action[0] == 2 * i + 1
assert action[1] == 2 * i
obs["mask"][:, action] = 1
assert obs["mask"].sum().item() == 20
def test_low_to_high_alternate():
policy = baselines.LowestIndexPolicy(alternate_sides=True, centered=False)
mask = torch.zeros(2, 10)
mask[0, 0::2] = 1
mask[1, 1::2] = 1
obs = {"mask": mask}
order = [[1, 9, 3, 7, 5], [0, 8, 2, 6, 4]]
for i in range(5):
action = policy(obs)
assert len(action) == 2
assert action[0] == order[0][i]
assert action[1] == order[1][i]
obs["mask"][:, action] = 1
assert obs["mask"].sum().item() == 20
def test_low_to_high_alternate_centered():
policy = baselines.LowestIndexPolicy(alternate_sides=True, centered=True)
mask = torch.zeros(2, 10)
mask[0, 0::2] = 1
mask[1, 1::2] = 1
obs = {"mask": mask}
order = [[5, 3, 7, 1, 9], [6, 4, 8, 2, 0]]
for i in range(5):
action = policy(obs)
assert len(action) == 2
assert action[0] == order[0][i]
assert action[1] == order[1][i]
obs["mask"][:, action] = 1
assert obs["mask"].sum().item() == 20
| active-mri-acquisition-main | tests/core/test_baselines.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Dict
import numpy as np
import torch
import activemri.envs.envs as envs
cfg_json_str = """
{
"data_location": "dummy_location",
"reconstructor": {
"cls": "tests.core.mocks.Reconstructor",
"options": {
"option1": 1,
"option2": 0.5,
"option3": "dummy",
"option4": true
},
"checkpoint_fname": "null",
"transform": "tests.core.mocks.transform"
},
"mask": {
"function": "tests.core.mocks.mask_func",
"args": {
"size": 10,
"how_many": 3
}
},
"reward_metric": "ssim",
"device": "cpu"
}
"""
config_dict = json.loads(cfg_json_str)
class Dataset:
def __init__(self, tensor_size, length):
self.tensor_size = tensor_size
self.length = length
def __len__(self):
return self.length
def __getitem__(self, item):
mock_kspace = (item + 1) * np.ones(
(self.tensor_size, self.tensor_size, 2) # 2 is for mocking (real, img.)
)
mock_mask = np.zeros(self.tensor_size)
mock_ground_truth = mock_kspace + 1
return mock_kspace, mock_mask, mock_ground_truth, {}, "fname", item
def make_data_init_fn(tensor_size, num_train, num_val, num_test):
train_data = Dataset(tensor_size, num_train)
val_data = Dataset(tensor_size, num_val)
test_data = Dataset(tensor_size, num_test)
def data_init_fn():
return train_data, val_data, test_data
return data_init_fn
# noinspection PyUnusedLocal
def mask_func(args, kspace_shapes, _rng, attrs=None):
batch_size = len(kspace_shapes)
mask = torch.zeros(batch_size, args["size"])
mask[0, : args["how_many"]] = 1
if batch_size > 1:
mask[1, : args["how_many"] - 1] = 1
return mask
def transform(kspace=None, mask=None, **_kwargs):
if isinstance(mask, torch.Tensor):
mask = mask.view(mask.shape[0], 1, -1, 1)
elif isinstance(mask, np.ndarray):
mask = torch.from_numpy(mask)
if isinstance(kspace, list):
new_kspace = []
for array in kspace:
new_kspace.append(torch.from_numpy(array))
return torch.stack(new_kspace), mask
return kspace, mask
# noinspection PyMethodMayBeStatic
class Reconstructor:
def __init__(self, **kwargs):
self.option1 = kwargs["option1"]
self.option2 = kwargs["option2"]
self.option3 = kwargs["option3"]
self.option4 = kwargs["option4"]
self.weights = None
self._eval = None
self.device = None
self.state_dict = {}
def init_from_checkpoint(self, _checkpoint):
self.weights = "init"
def eval(self):
self._eval = True
def to(self, device):
self.device = device
def forward(self, kspace, mask):
return {"reconstruction": kspace + mask}
__call__ = forward
def load_state_dict(self):
pass
class MRIEnv(envs.ActiveMRIEnv):
def __init__(
self,
num_parallel_episodes,
loops_train,
num_train=1,
num_val=1,
num_test=1,
seed=None,
):
super().__init__(
(32, 64), num_parallel_episodes=num_parallel_episodes, seed=seed
)
self._num_loops_train_data = loops_train
self._init_from_config_dict(config_dict)
self._tensor_size = self._cfg["mask"]["args"]["size"]
data_init_fn = make_data_init_fn(
self._tensor_size, num_train, num_val, num_test
)
self._setup_data_handlers(data_init_fn)
@staticmethod
def _compute_score_given_tensors(
reconstruction: torch.Tensor, ground_truth: torch.Tensor
) -> Dict[str, np.ndarray]:
return {"ssim": (reconstruction - ground_truth).abs().sum().numpy()}
| active-mri-acquisition-main | tests/core/mocks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import pytest # noqa: F401
import activemri.envs.util
def test_all_configs():
configs_root = "configs/"
for fname in os.listdir(configs_root):
with open(os.path.join(configs_root, fname), "r") as f:
cfg = json.load(f)
assert "data_location" in cfg
assert "device" in cfg
assert "reward_metric" in cfg
assert "mask" in cfg
mask_cfg = cfg["mask"]
try:
_ = activemri.envs.util.import_object_from_str(mask_cfg["function"])
except ModuleNotFoundError:
print(f"Mask function in config file {fname} was not found.")
assert False
assert "args" in mask_cfg and isinstance(mask_cfg["args"], dict)
assert "reconstructor" in cfg
reconstructor_cfg = cfg["reconstructor"]
assert "cls" in reconstructor_cfg
try:
_ = activemri.envs.util.import_object_from_str(reconstructor_cfg["cls"])
except ModuleNotFoundError:
print(f"Reconstructor class in config file {fname} was not found.")
assert False
assert "options" in reconstructor_cfg
assert "checkpoint_fname" in reconstructor_cfg
assert "transform" in reconstructor_cfg
try:
_ = activemri.envs.util.import_object_from_str(
reconstructor_cfg["transform"]
)
except ModuleNotFoundError:
print(f"Transform function in config file {fname} was not found.")
assert False
| active-mri-acquisition-main | tests/fastmri/test_configs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import activemri.envs.envs as envs
class TestMICCAIEnv:
env = envs.MICCAI2020Env()
def test_miccai_env_batch_content(self):
for i, batch in enumerate(self.env._train_data_handler):
# No check below for batch[1], since it's the mask and will be replaced later
for j in [0, 1, 3, 4, 5]:
assert isinstance(batch[j], list)
assert len(batch[j]) == self.env.num_parallel_episodes
for batch_idx in range(self.env.num_parallel_episodes):
assert isinstance(batch[0][batch_idx], np.ndarray)
assert batch[0][batch_idx].shape == (
640,
368,
2,
) # k-space
assert isinstance(batch[2][batch_idx], np.ndarray)
assert batch[2][batch_idx].shape == (640, 368, 2) # ground truth image
# data.attrs
assert len(batch[3][batch_idx]) == 4
for key in ["norm", "max", "patient_id", "acquisition"]:
assert key in batch[3][batch_idx]
# file name
assert isinstance(batch[4][batch_idx], str)
# slice_id
assert isinstance(batch[5][batch_idx], int)
if i == 10:
break
def test_miccai_reset(self):
obs, _ = self.env.reset()
assert len(obs) == 3
assert "reconstruction" in obs
assert "mask" in obs
assert "extra_outputs" in obs
assert obs["reconstruction"].shape == (
self.env.num_parallel_episodes,
640,
368,
2,
)
assert obs["mask"].shape == (self.env.num_parallel_episodes, 368)
class TestSingleCoilKneeEnv:
env = envs.SingleCoilKneeEnv()
def test_singlecoil_knee_env_batch_content(self):
for i, batch in enumerate(self.env._train_data_handler):
# No check below for batch[1], since it's the mask and will be replaced later
kspace, _, ground_truth, attrs, fname, slice_id = batch
for j in [0, 1, 3, 4, 5]:
assert isinstance(batch[j], list)
assert len(batch[j]) == self.env.num_parallel_episodes
for batch_idx in range(self.env.num_parallel_episodes):
assert isinstance(kspace[batch_idx], np.ndarray)
assert np.all(
np.iscomplex(kspace[batch_idx][np.nonzero(kspace[batch_idx])])
)
assert kspace[batch_idx].shape in [(640, 368), (640, 372)] # k-space
assert isinstance(ground_truth[batch_idx], np.ndarray)
assert not np.any(np.iscomplex(ground_truth[batch_idx]))
assert ground_truth[batch_idx].shape == (320, 320) # ground_truth
# data.attrs
assert len(attrs[batch_idx]) == 8
for key in [
"acquisition",
"max",
"norm",
"patient_id",
"padding_left",
"padding_right",
"encoding_size",
"recon_size",
]:
assert key in attrs[batch_idx]
# file name
assert isinstance(fname[batch_idx], str)
# slice_id
assert isinstance(slice_id[batch_idx], int)
if i == 10:
break
def test_singlecoil_knee_reset(self):
obs, _ = self.env.reset()
assert len(obs) == 3
assert "reconstruction" in obs
assert "mask" in obs
assert "extra_outputs" in obs
assert obs["reconstruction"].shape == (self.env.num_parallel_episodes, 320, 320)
assert obs["mask"].shape in [
(self.env.num_parallel_episodes, 368),
(self.env.num_parallel_episodes, 372),
]
| active-mri-acquisition-main | tests/fastmri/test_envs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "active-mri-acquisition"
copyright = "2020, Facebook AI Research"
author = "Facebook AI Research"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"nbsphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| active-mri-acquisition-main | docs/conf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import pickle
from typing import cast
import numpy as np
import torch
import activemri.baselines as baselines
import activemri.envs as envs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--budget", type=int, default=100, help="How many k-space columns to acquire."
)
parser.add_argument(
"--num_parallel_episodes",
type=int,
default=1,
help="The number of episodes the environment runs in parallel",
)
parser.add_argument(
"--num_episodes",
type=int,
default=100,
help="How many batches of episodes to run in total.",
)
parser.add_argument(
"--baseline",
type=str,
choices=[
"random",
"random-lb",
"lowtohigh",
"evaluator",
"ds-ddqn",
"ss-ddqn",
"oracle",
],
help="The algorithm to evaluate.",
)
parser.add_argument(
"--evaluator_path",
type=str,
default=None,
help="Path to checkpoint for evalutor network.",
)
parser.add_argument(
"--baseline_device",
type=str,
default="cpu",
help="Which torch device to use for the baseline (if 'evaluator' or '*ddqn').",
)
parser.add_argument(
"--dqn_checkpoint_path",
type=str,
default=None,
help="Checkpoint for the DDQN agent.",
)
parser.add_argument("--legacy_model", action="store_true")
parser.add_argument(
"--oracle_num_samples",
type=int,
default=20,
help="If using the one step greedy oracle, how many actions to sample each step.",
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Directory where results will be stored.",
)
parser.add_argument("--seed", type=int, default=0, help="Seed for the environment.")
parser.add_argument("--env", choices=["miccai", "miccai_extreme"])
args = parser.parse_args()
extreme = "_extreme" in args.env
env = envs.MICCAI2020Env(args.num_parallel_episodes, args.budget, extreme=extreme)
policy: baselines.Policy = None
if args.baseline == "random":
policy = baselines.RandomPolicy()
if args.baseline == "random-lb":
policy = baselines.RandomLowBiasPolicy(acceleration=3.0, centered=False)
if args.baseline == "lowtohigh":
policy = baselines.LowestIndexPolicy(alternate_sides=True, centered=False)
if args.baseline == "evaluator":
policy = baselines.CVPR19Evaluator(
args.evaluator_path,
torch.device(args.baseline_device),
add_mask=True,
)
if args.baseline == "oracle":
policy = baselines.OneStepGreedyOracle(
env, "ssim", num_samples=args.oracle_num_samples
)
if "ddqn" in args.baseline:
checkpoint_path = os.path.join(
args.dqn_checkpoint_path, "evaluation", "policy_best.pt"
)
checkpoint = torch.load(args.dqn_checkpoint_path)
options = checkpoint["options"]
if "miccai" in args.env:
initial_num_lines = 1 if "extreme" in args.env else 15
if args.legacy_model:
options.legacy_offset = initial_num_lines
policy = cast(baselines.DDQN, policy)
policy = baselines.DDQN(args.baseline_device, None, options)
policy.load_state_dict(checkpoint["dqn_weights"])
all_scores, all_img_idx = baselines.evaluate(
env, policy, args.num_episodes, args.seed, "test", verbose=True
)
os.makedirs(args.output_dir, exist_ok=True)
np.save(os.path.join(args.output_dir, "scores.npy"), all_scores)
with open(os.path.join(args.output_dir, "img_ids.pkl"), "wb") as f:
pickle.dump(all_img_idx, f)
| active-mri-acquisition-main | examples/run_evaluation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import activemri.baselines.ddqn as ddqn
import activemri.envs as envs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--budget", type=int, default=10)
parser.add_argument("--num_parallel_episodes", type=int, default=4)
parser.add_argument("--training_dir", type=str, default=None)
parser.add_argument("--device", type=str, default=None)
parser.add_argument("--extreme_acc", action="store_true")
parser.add_argument("--seed", type=int, default=0)
args = parser.parse_args()
env = envs.MICCAI2020Env(
args.num_parallel_episodes,
args.budget,
extreme=args.extreme_acc,
seed=args.seed,
)
tester = ddqn.DDQNTester(env, args.training_dir, args.device)
tester()
| active-mri-acquisition-main | examples/test_ddqn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
import activemri.baselines as mri_baselines
import activemri.envs as envs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--budget", type=int, default=10)
parser.add_argument("--num_parallel_episodes", type=int, default=4)
parser.add_argument("--device", type=str, default=None)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--extreme_acc", action="store_true")
parser.add_argument("--checkpoints_dir", type=str, default=None)
parser.add_argument("--mem_capacity", type=int, default=1000)
parser.add_argument(
"--dqn_model_type",
type=str,
choices=["simple_mlp", "evaluator"],
default="evaluator",
)
parser.add_argument(
"--reward_metric",
type=str,
choices=["mse", "ssim", "nmse", "psnr"],
default="ssim",
)
parser.add_argument("--resume", action="store_true")
parser.add_argument("--mask_embedding_dim", type=int, default=0)
parser.add_argument("--dqn_batch_size", type=int, default=2)
parser.add_argument("--dqn_burn_in", type=int, default=100)
parser.add_argument("--dqn_normalize", action="store_true")
parser.add_argument("--gamma", type=float, default=0.5)
parser.add_argument("--epsilon_start", type=float, default=1.0)
parser.add_argument("--epsilon_decay", type=int, default=10000)
parser.add_argument("--epsilon_end", type=float, default=0.001)
parser.add_argument("--dqn_learning_rate", type=float, default=0.001)
parser.add_argument("--num_train_steps", type=int, default=1000)
parser.add_argument("--num_test_episodes", type=int, default=2)
parser.add_argument("--dqn_only_test", action="store_true")
parser.add_argument("--dqn_weights_path", type=str, default=None)
parser.add_argument("--dqn_test_episode_freq", type=int, default=None)
parser.add_argument("--target_net_update_freq", type=int, default=5000)
parser.add_argument("--freq_dqn_checkpoint_save", type=int, default=1000)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
env = envs.MICCAI2020Env(
args.num_parallel_episodes,
args.budget,
obs_includes_padding=args.dqn_model_type == "evaluator",
extreme=args.extreme_acc,
)
env.seed(args.seed)
policy = mri_baselines.DDQNTrainer(args, env, torch.device(args.device))
policy()
| active-mri-acquisition-main | examples/train_ddqn.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is the main script used for training Classy Vision jobs.
This can be used for training on your local machine, using CPU or GPU, and
for distributed training. This script also supports Tensorboard, Visdom and
checkpointing.
Example:
For training locally, simply specify a configuration file and whether
to use CPU or GPU:
$ ./classy_train.py --device gpu --config configs/my_config.json
For distributed training, this can be invoked via
:func:`torch.distributed.launch`. For instance
$ python -m torch.distributed.launch \
--nnodes=1 \
--nproc_per_node=1 \
--master_addr=localhost \
--master_port=29500 \
--use_env \
classy_train.py \
--config=configs/resnet50_synthetic_image_classy_config.json \
--log_freq=100
For other use cases, try
$ ./classy_train.py --help
"""
import logging
import os
from datetime import datetime
from pathlib import Path
import torch
from classy_vision.generic.distributed_util import get_rank, get_world_size
from classy_vision.generic.opts import check_generic_args, parse_train_arguments
from classy_vision.generic.registry_utils import import_all_packages_from_directory
from classy_vision.generic.util import load_json
from classy_vision.hooks import (
CheckpointHook,
LossLrMeterLoggingHook,
ModelComplexityHook,
ProfilerHook,
ProgressBarHook,
TensorboardPlotHook,
VisdomHook,
)
from classy_vision.tasks import build_task, FineTuningTask
from classy_vision.trainer import DistributedTrainer, LocalTrainer
from torchvision import set_image_backend, set_video_backend
try:
import hydra
import omegaconf
hydra_available = True
except ImportError:
hydra_available = False
def main(args, config):
# Global flags
torch.manual_seed(0)
set_image_backend(args.image_backend)
set_video_backend(args.video_backend)
task = build_task(config)
# Load checkpoint, if available.
if args.checkpoint_load_path:
task.set_checkpoint(args.checkpoint_load_path)
# Load a checkpoint contraining a pre-trained model. This is how we
# implement fine-tuning of existing models.
if args.pretrained_checkpoint_path:
assert isinstance(
task, FineTuningTask
), "Can only use a pretrained checkpoint for fine tuning tasks"
task.set_pretrained_checkpoint(args.pretrained_checkpoint_path)
# Configure hooks to do tensorboard logging, checkpoints and so on.
# `configure_hooks` adds default hooks, while extra hooks can be specified
# in config file and stored in `task.hooks`. Here, we merge them when we
# set the final hooks of the task.
task.set_hooks(configure_hooks(args, config) + task.hooks)
# LocalTrainer is used for a single replica. DistributedTrainer will setup
# training to use PyTorch's DistributedDataParallel.
trainer_class = {"none": LocalTrainer, "ddp": DistributedTrainer}[
args.distributed_backend
]
trainer = trainer_class()
logging.info(
f"Starting training on rank {get_rank()} worker. "
f"World size is {get_world_size()}"
)
# That's it! When this call returns, training is done.
trainer.train(task)
output_folder = Path(args.checkpoint_folder).resolve()
logging.info("Training successful!")
logging.info(f'Results of this training run are available at: "{output_folder}"')
def configure_hooks(args, config):
hooks = [LossLrMeterLoggingHook(args.log_freq), ModelComplexityHook()]
# Make a folder to store checkpoints and tensorboard logging outputs
suffix = datetime.now().isoformat()
base_folder = f"{Path(__file__).parent}/output_{suffix}"
if args.checkpoint_folder == "":
args.checkpoint_folder = base_folder + "/checkpoints"
os.makedirs(args.checkpoint_folder, exist_ok=True)
logging.info(f"Logging outputs to {base_folder}")
logging.info(f"Logging checkpoints to {args.checkpoint_folder}")
if not args.skip_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter
os.makedirs(Path(base_folder) / "tensorboard", exist_ok=True)
tb_writer = SummaryWriter(log_dir=Path(base_folder) / "tensorboard")
hooks.append(TensorboardPlotHook(tb_writer))
except ImportError:
logging.warning("tensorboard not installed, skipping tensorboard hooks")
args_dict = vars(args)
args_dict["config"] = config
hooks.append(
CheckpointHook(
args.checkpoint_folder, args_dict, checkpoint_period=args.checkpoint_period
)
)
if args.profiler:
hooks.append(ProfilerHook())
if args.show_progress:
hooks.append(ProgressBarHook())
if args.visdom_server != "":
hooks.append(VisdomHook(args.visdom_server, args.visdom_port))
return hooks
if hydra_available:
@hydra.main(config_path="hydra_configs", config_name="args")
def hydra_main(cfg):
args = cfg
check_generic_args(cfg)
config = omegaconf.OmegaConf.to_container(cfg.config)
main(args, config)
# run all the things:
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.info("Classy Vision's default training script.")
# This imports all modules in the same directory as classy_train.py
# Because of the way Classy Vision's registration decorators work,
# importing a module has a side effect of registering it with Classy
# Vision. This means you can give classy_train.py a config referencing your
# custom module (e.g. my_dataset) and it'll actually know how to
# instantiate it.
file_root = Path(__file__).parent
import_all_packages_from_directory(file_root)
if hydra_available:
hydra_main()
else:
args = parse_train_arguments()
config = load_json(args.config_file)
main(args, config)
| ClassyVision-main | classy_train.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import sys
from setuptools import find_namespace_packages, find_packages, setup
if __name__ == "__main__":
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >=3.6 is required for Classy Vision.")
# get version string from module
with open(
os.path.join(os.path.dirname(__file__), "classy_vision/__init__.py"), "r"
) as f:
version = re.search(r"__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M).group(
1
)
print("-- Building version " + version)
with open("README.md", encoding="utf8") as f:
readme = f.read()
with open("requirements.txt") as f:
reqs = f.read()
setup(
name="classy_vision",
version=version,
description="An end-to-end PyTorch framework for image and video classification.",
long_description_content_type="text/markdown",
long_description=readme,
url="https://classyvision.ai",
project_urls={
"Documentation": "https://classyvision.ai",
"Source": "https://github.com/facebookresearch/ClassyVision",
},
license="MIT License",
python_requires=">=3.6",
packages=find_packages(exclude=("tests",))
+ find_namespace_packages(include=["hydra_plugins.*"]),
install_requires=reqs.strip().split("\n"),
extras_require={
"dev": [
"GitPython",
"black==19.3b0",
"sphinx",
"isort==5.2.2",
"bs4",
"nbconvert==6.0.7",
"pre-commit",
"parameterized",
"fairscale==0.3.7",
]
},
package_data={"classy_vision": ["configs/*.json", "templates"]},
data_files=[("classy_vision", ["classy_train.py"])],
include_package_data=True,
test_suite="test.suites.unittests",
scripts=["bin/classy-project"],
keywords=["deep learning", "pytorch", "AI"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
],
)
| ClassyVision-main | setup.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import torch
from classy_vision.hub import ClassyHubInterface
dependencies = ["torch", "torchvision"]
# export the wsl models (https://github.com/facebookresearch/WSL-Images)
resnext_wsl_models = [
"resnext101_32x8d_wsl",
"resnext101_32x16d_wsl",
"resnext101_32x32d_wsl",
"resnext101_32x48d_wsl",
]
def _create_interface_from_torchhub(github, *args, **kwargs):
model = torch.hub.load(github, *args, **kwargs)
return ClassyHubInterface.from_model(model)
for model in resnext_wsl_models:
globals()[model] = functools.partial(
_create_interface_from_torchhub, "facebookresearch/WSL-Images", model
)
| ClassyVision-main | hubconf.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.generic.util import convert_to_one_hot
class TestUtils(unittest.TestCase):
def test_single(self):
targets = torch.tensor([[4]])
one_hot_target = convert_to_one_hot(targets, 5)
self.assertTrue(torch.allclose(one_hot_target, torch.tensor([[0, 0, 0, 0, 1]])))
def test_two(self):
targets = torch.tensor([[0], [1]])
one_hot_target = convert_to_one_hot(targets, 3)
self.assertTrue(
torch.allclose(one_hot_target, torch.tensor([[1, 0, 0], [0, 1, 0]]))
)
| ClassyVision-main | test/losses_generic_utils_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
from classy_vision.models import ClassyBlock
class TestClassyStatelessBlock(unittest.TestCase):
def setUp(self):
"""
This test checks on output stateful (default) and stateless variants of ClassyBlock
by enabling and propagating the environmental variable CLASSY_BLOCK_STATELESS
"""
# initialize stateful model
self._model_stateful = ClassyBlock(name="stateful", module=torch.nn.Identity())
# initialize stateless model
os.environ["CLASSY_BLOCK_STATELESS"] = "1"
self._model_stateless = ClassyBlock(
name="stateless", module=torch.nn.Identity()
)
# note: use low=1 since default of ClassyBlock output variable is torch.zeros
self._data = torch.randint(low=1, high=5, size=(3, 5, 5))
def tearDown(self):
# environmental variables do not propagate outside the scope of this test
# but we'll clean it up anyway
del os.environ["CLASSY_BLOCK_STATELESS"]
def test_classy_output_stateless(self):
# confirm model.output is (stateless) i.e. default of torch.zeros(0) and
# that output == data
output = self._model_stateless.forward(self._data)
self.assertTrue(torch.equal(self._model_stateless.output, torch.zeros(0)))
self.assertTrue(torch.equal(output, self._data))
def test_classy_output_stateful(self):
# confirm model.output keeps input data and that output == data
output = self._model_stateful.forward(self._data)
self.assertTrue(torch.equal(self._model_stateful.output, output))
self.assertTrue(torch.equal(output, self._data))
| ClassyVision-main | test/models_classy_block_stateless_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from classy_vision.models import build_model, EfficientNet
class TestEfficientNetModel(unittest.TestCase):
def get_model_config(self, use_model_name=False):
model_config = {
"name": "efficientnet",
"model_params": {
"width_coefficient": 1.1,
"depth_coefficient": 1.2,
"resolution": 260,
"dropout_rate": 0.3,
},
"bn_momentum": 0.01,
"bn_epsilon": 1e-3,
"drop_connect_rate": 0.2,
"num_classes": 1000,
"width_divisor": 8,
"min_width": None,
"use_se": True,
}
if use_model_name:
del model_config["model_params"]
model_config["model_name"] = "B2"
return model_config
def test_build_model(self):
"""
Test that the model builds using a config using either model_params or
model_name.
"""
for use_model_name in [True, False]:
model = build_model(self.get_model_config(use_model_name=use_model_name))
assert isinstance(model, EfficientNet)
def test_build_preset_model(self):
configs = [{"name": f"efficientnet_b{i}" for i in range(8)}]
for config in configs:
model = build_model(config)
self.assertIsInstance(model, EfficientNet)
def test_model_forward(self):
image_shape = (3, 260, 260)
num_images = (10,)
input = torch.randn(num_images + image_shape)
model = build_model(self.get_model_config())
model(input)
| ClassyVision-main | test/models_efficientnet_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
import torch.nn as nn
from classy_vision.hooks import ClassyHook
from classy_vision.hooks.precise_batch_norm_hook import PreciseBatchNormHook
from classy_vision.tasks import build_task
from classy_vision.trainer import ClassyTrainer
from test.generic.config_utils import get_test_mlp_task_config
from test.generic.hook_test_utils import HookTestBase
class TestPreciseBatchNormHook(HookTestBase):
def _get_bn_stats(self, model):
model = copy.deepcopy(model)
stats = {}
for name, module in model.named_modules():
if isinstance(module, nn.modules.batchnorm._BatchNorm):
stats[name] = {"mean": module.running_mean, "var": module.running_var}
return stats
def _compare_bn_stats(self, stats_1, stats_2):
# make sure the stats are non empty
self.assertGreater(len(stats_1), 0)
for name in stats_1:
if not torch.allclose(
stats_1[name]["mean"], stats_2[name]["mean"]
) or not torch.allclose(stats_1[name]["var"], stats_2[name]["var"]):
return False
return True
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
self.constructor_test_helper(
config={"num_samples": 10},
hook_type=PreciseBatchNormHook,
hook_registry_name="precise_bn",
invalid_configs=[{}, {"num_samples": 0}],
)
def test_train(self):
config = get_test_mlp_task_config()
task = build_task(config)
num_samples = 10
for cache_sample in [True, False]:
precise_batch_norm_hook = PreciseBatchNormHook(num_samples, cache_sample)
task.set_hooks([precise_batch_norm_hook])
task.prepare()
trainer = ClassyTrainer()
trainer.train(task)
def test_bn_stats(self):
base_self = self
class TestHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self):
self.train_bn_stats = None
self.test_bn_stats = None
def on_step(self, task):
if task.train:
self.train_bn_stats = base_self._get_bn_stats(task.base_model)
else:
self.test_bn_stats = base_self._get_bn_stats(task.base_model)
config = get_test_mlp_task_config()
task = build_task(config)
num_samples = 10
precise_batch_norm_hook = PreciseBatchNormHook(num_samples)
test_hook = TestHook()
task.set_hooks([precise_batch_norm_hook, test_hook])
trainer = ClassyTrainer()
trainer.train(task)
updated_bn_stats = self._get_bn_stats(task.base_model)
# the stats should be modified after train steps but not after test steps
self.assertFalse(
self._compare_bn_stats(test_hook.train_bn_stats, updated_bn_stats)
)
self.assertTrue(
self._compare_bn_stats(test_hook.test_bn_stats, updated_bn_stats)
)
| ClassyVision-main | test/hooks_precise_batch_norm_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import RecallAtKMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestRecallAtKMeter(ClassificationMeterTest):
def test_recall_meter_registry(self):
meter = meters.build_meter({"name": "recall_at_k", "topk": [1, 3]})
self.assertTrue(isinstance(meter, RecallAtKMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0?1
]
)
# One-hot encoding, 1 = positive for class
# sample-1: 1, sample-2: 0, sample-3: 0,1,2
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
# Note for ties, we select randomly, so we should not use ambiguous ties
expected_value = {"top_1": 2 / 5.0, "top_2": 4 / 5.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_double_meter_update_and_reset(self):
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]),
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
]
# One-hot encoding, 1 = positive for class
# batch-1: sample-1: 1, sample-2: 0, sample-3: 0,1,2
# batch-2: sample-1: 1, sample-2: 1, sample-3: 1
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]),
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
]
# First batch has top-1 recall of 2/5.0, top-2 recall of 4/5.0
# Second batch has top-1 recall of 2/3.0, top-2 recall of 2/3.0
expected_value = {"top_1": 4 / 8.0, "top_2": 6 / 8.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_meter_invalid_model_output(self):
meter = RecallAtKMeter(topk=[1, 2])
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[0.33, 0.33, 0.34], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]]
)
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = RecallAtKMeter(topk=[1, 2])
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0/1
]
)
# Target shape is of length 3
target = torch.tensor([[[0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_topk(self):
meter = RecallAtKMeter(topk=[1, 5])
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0/1
]
)
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
#
# Expected value is the expected value of meter1 For this test
# to work, top-1 / top-2 values of meter0 / meter1 should be
# different
meters = [RecallAtKMeter(topk=[1, 2]), RecallAtKMeter(topk=[1, 2])]
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]),
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
]
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 0]]),
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]),
]
# Second update's expected value
expected_value = {"top_1": 2 / 3.0, "top_2": 2 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [RecallAtKMeter(topk=[1, 2]), RecallAtKMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is probability of class
model_outputs = [
torch.tensor(
[[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]
), # Meter 0
torch.tensor(
[[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]
), # Meter 1
torch.tensor(
[[0.3, 0.4, 0.3], [0.2, 0.65, 0.15], [0.33, 0.33, 0.34]]
), # Meter 0
torch.tensor(
[[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]
), # Meter 1
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]), # Meter 0
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]), # Meter 1
torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]), # Meter 0
torch.tensor([[0, 1, 0], [0, 1, 0], [0, 1, 0]]), # Meter 1
]
# In first two updates there are 4 correct top-1 out of 8
# total, 6 correct in top 2 out of 8. The same occurs in the
# second two updates and is added to first
expected_values = [
{"top_1": 4 / 8.0, "top_2": 6 / 8.0}, # After one update to each meter
{"top_1": 8 / 16.0, "top_2": 12 / 16.0}, # After two updates to each meter
]
self.meter_distributed_test(meters, model_outputs, targets, expected_values)
def test_non_onehot_target(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 2, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8, 0.1]]),
]
# One-hot encoding, 1 = positive for class
targets = [
torch.tensor([[1], [1], [1]]), # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
torch.tensor([[0], [1], [2]]), # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
]
# Note for ties, we select randomly, so we should not use ambiguous ties
# First batch has top-1 recall of 2/3.0, top-2 recall of 2/6.0
# Second batch has top-1 recall of 1/3.0, top-2 recall of 4/6.0
expected_value = {"top_1": 3 / 6.0, "top_2": 6 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_non_onehot_target_one_dim_target(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with one dimensional targets.
"""
meter = RecallAtKMeter(topk=[1, 2], target_is_one_hot=False, num_classes=3)
# Batchsize = 2, num classes = 3, score is probability of class
model_outputs = [
torch.tensor([[0.05, 0.4, 0.05], [0.15, 0.65, 0.2], [0.4, 0.2, 0.4]]),
torch.tensor([[0.2, 0.4, 0.4], [0.2, 0.65, 0.15], [0.1, 0.8, 0.1]]),
]
# One-hot encoding, 1 = positive for class
targets = [
torch.tensor([1, 1, 1]), # [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
torch.tensor([0, 1, 2]), # [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
]
# Note for ties, we select randomly, so we should not use ambiguous ties
# First batch has top-1 recall of 2/3.0, top-2 recall of 2/6.0
# Second batch has top-1 recall of 1/3.0, top-2 recall of 4/6.0
expected_value = {"top_1": 3 / 6.0, "top_2": 6 / 12.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_meter_fp16(self):
"""
This test verifies that the meter works if the input tensor is fp16.
"""
meter = RecallAtKMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is probability of class
model_output = torch.tensor(
[
[0.2, 0.4, 0.4], # top-1: 1/2, top-2: 1/2
[0.2, 0.65, 0.15], # top-1: 1, top-2: 1/0
[0.33, 0.33, 0.34], # top-1: 2, top-2: 2/0?1
]
).half()
# One-hot encoding, 1 = positive for class
# sample-1: 1, sample-2: 0, sample-3: 0,1,2
target = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 1, 1]]).half()
# Note for ties, we select randomly, so we should not use ambiguous ties
expected_value = {"top_1": 2 / 5.0, "top_2": 4 / 5.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
| ClassyVision-main | test/meters_recall_meter_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import unittest.mock as mock
from classy_vision.hooks import ProfilerHook
from test.generic.config_utils import get_test_classy_task, get_test_classy_video_task
from test.generic.hook_test_utils import HookTestBase
class TestProfilerHook(HookTestBase):
def test_constructors(self) -> None:
"""
Test that the hooks are constructed correctly.
"""
config = {}
self.constructor_test_helper(
config=config, hook_type=ProfilerHook, hook_registry_name="profiler"
)
@mock.patch("torch.autograd.profiler.profile", auto_spec=True)
@mock.patch("classy_vision.hooks.profiler_hook.summarize_profiler_info")
def test_profiler(
self,
mock_summarize_profiler_info: mock.MagicMock,
mock_profile_cls: mock.MagicMock,
) -> None:
"""
Tests that a profile instance is returned by the profiler
and that the profiler actually ran.
"""
mock_summarize_profiler_info.return_value = ""
mock_profile = mock.MagicMock()
mock_profile_returned = mock.MagicMock()
mock_profile.__enter__.return_value = mock_profile_returned
mock_profile_cls.return_value = mock_profile
for task in [get_test_classy_task(), get_test_classy_video_task()]:
task.prepare()
# create a model tensorboard hook
profiler_hook = ProfilerHook()
with self.assertLogs():
profiler_hook.on_start(task)
# a new profile should be created with use_cuda=True
mock_profile_cls.assert_called_once_with(use_cuda=True)
mock_profile_cls.reset_mock()
# summarize_profiler_info should have been called once with the profile
mock_summarize_profiler_info.assert_called_once()
profile = mock_summarize_profiler_info.call_args[0][0]
mock_summarize_profiler_info.reset_mock()
self.assertEqual(profile, mock_profile_returned)
| ClassyVision-main | test/hooks_profiler_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import unittest
from itertools import product
from typing import Any, Dict, List
import torch
from classy_vision.generic.distributed_util import _PRIMARY_RANK, broadcast_object
from torch.multiprocessing import Event, Process, Queue
def init_and_run_process(
rank, world_size, filename, fn, input, q, wait_event, backend="gloo"
):
torch.distributed.init_process_group(
backend, init_method=f"file://{filename}", rank=rank, world_size=world_size
)
r = fn(*input)
q.put(r)
wait_event.wait()
return
def run_in_process_group(filename: str, calls: List[Dict[str, Any]]):
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
processes = []
q = Queue()
wait_event = Event()
# run the remaining processes
# for rank in range(world_size - 1):
for rank, call in enumerate(calls):
p = Process(
target=init_and_run_process,
args=(
rank,
call["world_size"],
filename,
call["function"],
call["inputs"],
q,
wait_event,
),
)
p.start()
processes.append(p)
# fetch the results from the queue before joining, the background processes
# need to be alive if the queue contains tensors. See
# https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847/3 # noqa: B950
results = []
for _ in range(len(processes)):
results.append(q.get())
wait_event.set()
for p in processes:
p.join()
return results
class TestDistributedUtil(unittest.TestCase):
@staticmethod
def _get_test_objects():
return [
{"a": 12, "b": [2, 3, 4], "tensor": torch.randn(10, 10)},
None,
{"tensor": torch.randn(10000, 10000)}, # 400 MB
]
def test_broadcast_object(self):
world_size = 3
for use_disk, obj in product([True, False], self._get_test_objects()):
filename = tempfile.NamedTemporaryFile(delete=True).name
inputs = [None] * world_size
inputs[0] = obj # only the primary worker has the object
calls = [
{
"world_size": world_size,
"function": broadcast_object,
"inputs": [i, _PRIMARY_RANK, use_disk],
}
for i in inputs
]
results = run_in_process_group(filename, calls)
# check that all replicas got identical objects
self.assertEqual(len(results), world_size)
for result in results:
if isinstance(obj, dict):
for key in obj:
if key == "tensor":
self.assertTrue(torch.allclose(result[key], obj[key]))
else:
self.assertEqual(result[key], obj[key])
else:
self.assertEqual(result, obj)
def test_broadcast_object_pick_source(self):
world_size = 3
for use_disk, obj in product([True, False], self._get_test_objects()):
filename = tempfile.NamedTemporaryFile(delete=True).name
inputs = [None] * world_size
source_rank = 1
inputs[source_rank] = obj # only the rank 1 worker has the object
calls = [
{
"world_size": world_size,
"function": broadcast_object,
"inputs": [i, source_rank, use_disk],
}
for i in inputs
]
results = run_in_process_group(filename, calls)
# check that all replicas got identical objects
self.assertEqual(len(results), world_size)
for result in results:
if isinstance(obj, dict):
for key in obj:
if key == "tensor":
self.assertTrue(torch.allclose(result[key], obj[key]))
else:
self.assertEqual(result[key], obj[key])
else:
self.assertEqual(result, obj)
| ClassyVision-main | test/generic_distributed_util_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.losses import BarronLoss, build_loss
class TestBarronLoss(unittest.TestCase):
def _get_config(self):
return {"name": "barron", "size_average": True, "alpha": 1.0, "c": 1.0}
def _get_outputs(self):
return torch.tensor([[2.0]])
def _get_targets(self):
return torch.tensor([3.0])
def test_build_barron(self):
config = self._get_config()
crit = build_loss(config)
self.assertTrue(isinstance(crit, BarronLoss))
self.assertEqual(crit.size_average, config["size_average"])
self.assertAlmostEqual(crit.alpha, config["alpha"])
self.assertAlmostEqual(crit.c, config["c"])
def test_barron(self):
config = self._get_config()
crit = BarronLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 0.41421353816986084)
# Alpha = 0
config = self._get_config()
config["alpha"] = 0.0
crit = BarronLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 0.40546512603759766)
# Alpha = inf
config = self._get_config()
config["alpha"] = float("inf")
crit = BarronLoss.from_config(config)
outputs = self._get_outputs()
targets = self._get_targets()
self.assertAlmostEqual(crit(outputs, targets).item(), 0.39346933364868164)
def test_deep_copy(self):
config = self._get_config()
crit1 = build_loss(config)
self.assertTrue(isinstance(crit1, BarronLoss))
outputs = self._get_outputs()
targets = self._get_targets()
crit1(outputs, targets)
crit2 = copy.deepcopy(crit1)
self.assertAlmostEqual(
crit1(outputs, targets).item(), crit2(outputs, targets).item()
)
| ClassyVision-main | test/losses_barron_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.optim.param_scheduler import build_param_scheduler
from classy_vision.optim.param_scheduler.composite_scheduler import (
CompositeParamScheduler,
IntervalScaling,
UpdateInterval,
)
class TestCompositeScheduler(unittest.TestCase):
_num_epochs = 10
def _get_valid_long_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "constant", "value": 0.1},
{"name": "constant", "value": 0.2},
{"name": "constant", "value": 0.3},
{"name": "constant", "value": 0.4},
],
"lengths": [0.2, 0.4, 0.1, 0.3],
}
def _get_lengths_sum_less_one_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "constant", "value": 0.1},
{"name": "constant", "value": 0.2},
],
"lengths": [0.7, 0.2999],
}
def _get_valid_mixed_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "step", "values": [0.1, 0.2, 0.3, 0.4, 0.5], "num_epochs": 10},
{"name": "cosine", "start_value": 0.42, "end_value": 0.0001},
],
"lengths": [0.5, 0.5],
}
def _get_valid_linear_config(self):
return {
"name": "composite",
"schedulers": [
{"name": "linear", "start_value": 0.0, "end_value": 0.5},
{"name": "linear", "start_value": 0.5, "end_value": 1.0},
],
"lengths": [0.5, 0.5],
"interval_scaling": ["rescaled", "rescaled"],
}
def test_invalid_config(self):
config = self._get_valid_mixed_config()
bad_config = copy.deepcopy(config)
# No schedulers
bad_config["schedulers"] = []
bad_config["lengths"] = []
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Size of schedulers and lengths doesn't match
bad_config["schedulers"] = copy.deepcopy(config["schedulers"])
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["schedulers"].append(bad_config["schedulers"][-1])
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Sum of lengths < 1
bad_config["schedulers"] = copy.deepcopy(config["schedulers"])
bad_config["lengths"][-1] -= 0.1
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Sum of lengths > 1
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["lengths"][-1] += 0.1
with self.assertRaises(ValueError):
CompositeParamScheduler.from_config(bad_config)
# Bad value for update_interval
bad_config["lengths"] = copy.deepcopy(config["lengths"])
bad_config["update_interval"] = "epochs"
with self.assertRaises(Exception):
CompositeParamScheduler.from_config(bad_config)
# Bad value for composition_mode
del bad_config["update_interval"]
bad_config["interval_scaling"] = ["rescaled", "rescaleds"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
# Wrong number composition modes
del bad_config["interval_scaling"]
bad_config["interval_scaling"] = ["rescaled"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
# Missing required parameters
del bad_config["interval_scaling"]
bad_config["lengths"] = config["lengths"]
del bad_config["lengths"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
bad_config["lengths"] = config["lengths"]
del bad_config["schedulers"]
with self.assertRaises(AssertionError):
CompositeParamScheduler.from_config(bad_config)
def test_long_scheduler(self):
config = self._get_valid_long_config()
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.1, 0.1, 0.2, 0.2, 0.2, 0.2, 0.3, 0.4, 0.4, 0.4]
self.assertEqual(schedule, expected_schedule)
def test_scheduler_lengths_within_epsilon_of_one(self):
config = self._get_lengths_sum_less_one_config()
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2]
self.assertEqual(schedule, expected_schedule)
def test_scheduler_update_interval(self):
config = self._get_valid_mixed_config()
# Check default
scheduler = CompositeParamScheduler.from_config(config)
self.assertEqual(scheduler.update_interval, UpdateInterval.STEP)
# Check step
step_config = copy.deepcopy(config)
step_config["update_interval"] = "step"
scheduler = build_param_scheduler(step_config)
self.assertEqual(scheduler.update_interval, UpdateInterval.STEP)
# Check epoch
epoch_config = copy.deepcopy(config)
epoch_config["update_interval"] = "epoch"
scheduler = build_param_scheduler(epoch_config)
self.assertEqual(scheduler.update_interval, UpdateInterval.EPOCH)
def test_build_composite_scheduler(self):
config = self._get_valid_mixed_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, CompositeParamScheduler))
schedulers = [
build_param_scheduler(scheduler_config)
for scheduler_config in config["schedulers"]
]
composite = CompositeParamScheduler(
schedulers=schedulers,
lengths=config["lengths"],
update_interval=UpdateInterval.EPOCH,
interval_scaling=[IntervalScaling.RESCALED, IntervalScaling.FIXED],
)
self.assertTrue(isinstance(composite, CompositeParamScheduler))
def test_scheduler_with_mixed_types(self):
config = self._get_valid_mixed_config()
scheduler_0 = build_param_scheduler(config["schedulers"][0])
scheduler_1 = build_param_scheduler(config["schedulers"][1])
# Check scaled
config["interval_scaling"] = ["rescaled", "rescaled"]
scheduler = CompositeParamScheduler.from_config(config)
scaled_schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, self._num_epochs, 2)
] + [
round(scheduler_1(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, self._num_epochs, 2)
]
self.assertEqual(scaled_schedule, expected_schedule)
# Check fixed
config["interval_scaling"] = ["fixed", "fixed"]
scheduler = CompositeParamScheduler.from_config(config)
fixed_schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, int(self._num_epochs / 2))
] + [
round(scheduler_1(epoch_num / self._num_epochs), 4)
for epoch_num in range(int(self._num_epochs / 2), self._num_epochs)
]
self.assertEqual(fixed_schedule, expected_schedule)
# Check that default is rescaled
del config["interval_scaling"]
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
self.assertEqual(scaled_schedule, schedule)
# Check warmup of rescaled then fixed
config["interval_scaling"] = ["rescaled", "fixed"]
scheduler = CompositeParamScheduler.from_config(config)
fixed_schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
round(scheduler_0(epoch_num / self._num_epochs), 4)
for epoch_num in range(0, int(self._num_epochs), 2)
] + [
round(scheduler_1(epoch_num / self._num_epochs), 4)
for epoch_num in range(int(self._num_epochs / 2), self._num_epochs)
]
self.assertEqual(fixed_schedule, expected_schedule)
def test_linear_scheduler_no_gaps(self):
config = self._get_valid_linear_config()
# Check rescaled
scheduler = CompositeParamScheduler.from_config(config)
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
self.assertEqual(expected_schedule, schedule)
# Check fixed composition gives same result as only 1 scheduler
config["schedulers"][1] = config["schedulers"][0]
config["interval_scaling"] = ["fixed", "fixed"]
scheduler = CompositeParamScheduler.from_config(config)
linear_scheduler = build_param_scheduler(config["schedulers"][0])
schedule = [
scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
linear_scheduler(epoch_num / self._num_epochs)
for epoch_num in range(self._num_epochs)
]
self.assertEqual(expected_schedule, schedule)
| ClassyVision-main | test/optim_param_scheduler_composite_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
from classy_vision.hooks import build_hook, build_hooks, ClassyHook, register_hook
@register_hook("test_hook")
class TestHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, a, b):
super().__init__()
self.state.a = a
self.state.b = b
@classmethod
def from_config(cls, config):
return cls(**config)
@register_hook("test_hook_new")
class TestHookNew(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, b, c):
super().__init__()
self.state.b = b
self.state.c = c
@classmethod
def from_config(cls, config):
return cls(**config)
class TestClassyHook(unittest.TestCase):
def test_hook_registry_and_builder(self):
config = {"name": "test_hook", "a": 1, "b": 2}
hook1 = build_hook(hook_config=config)
self.assertTrue(isinstance(hook1, TestHook))
self.assertTrue(hook1.state.a == 1)
self.assertTrue(hook1.state.b == 2)
hook_configs = [copy.deepcopy(config), copy.deepcopy(config)]
hooks = build_hooks(hook_configs=hook_configs)
for hook in hooks:
self.assertTrue(isinstance(hook, TestHook))
self.assertTrue(hook.state.a == 1)
self.assertTrue(hook.state.b == 2)
def test_state_dict(self):
a = 0
b = {1: 2, 3: [4]}
test_hook = TestHook(a, b)
state_dict = test_hook.get_classy_state()
# create a new test_hook and set its state to the old hook's.
test_hook = TestHook("", 0)
test_hook.set_classy_state(state_dict)
self.assertEqual(test_hook.state.a, a)
self.assertEqual(test_hook.state.b, b)
# make sure we're able to load old checkpoints
b_new = {1: 2}
c_new = "hello"
test_hook_new = TestHookNew(b_new, c_new)
test_hook_new.set_classy_state(state_dict)
self.assertEqual(test_hook_new.state.a, a)
self.assertEqual(test_hook_new.state.b, b)
self.assertEqual(test_hook_new.state.c, c_new)
| ClassyVision-main | test/hooks_classy_hook_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import unittest
import torch
from classy_vision.losses import build_loss, LabelSmoothingCrossEntropyLoss
class TestLabelSmoothingCrossEntropyLoss(unittest.TestCase):
def test_build_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.1,
}
crit = build_loss(config)
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
self.assertEqual(crit._ignore_index, -1)
def test_smoothing_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.1,
}
crit = build_loss(config)
targets = torch.tensor([[0, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor([[0.2 / 11, 0.2 / 11, 0.2 / 11, 0.2 / 11, 10.2 / 11]]),
)
)
def test_smoothing_ignore_index_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([[-1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor([[1 / 15, 1 / 15, 1 / 15, 1 / 15, 11 / 15]]),
)
)
def test_smoothing_multilabel_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([[1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[1.0, 0.0, 0.0, 0.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor([[6 / 15, 1 / 15, 1 / 15, 1 / 15, 6 / 15]]),
)
)
def test_smoothing_all_ones_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.1,
}
crit = build_loss(config)
targets = torch.tensor([[1, 1, 1, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 4)
self.assertTrue(
torch.allclose(valid_targets, torch.tensor([[1.0, 1.0, 1.0, 1.0]]))
)
smoothed_targets = crit.smooth_targets(valid_targets, 4)
self.assertTrue(
torch.allclose(smoothed_targets, torch.tensor([[0.25, 0.25, 0.25, 0.25]]))
)
def test_smoothing_mixed_one_hot_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([[1, 1, 1, 1, 1], [1, 0, 0, 0, 1]])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(
valid_targets,
torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0]]),
)
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor(
[
[0.2, 0.2, 0.2, 0.2, 0.2],
[6 / 15, 1 / 15, 1 / 15, 1 / 15, 6 / 15],
]
),
)
)
def test_smoothing_class_targets(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
targets = torch.tensor([4, -1])
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
valid_targets = crit.compute_valid_targets(targets, 5)
self.assertTrue(
torch.allclose(
valid_targets,
torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]]),
)
)
smoothed_targets = crit.smooth_targets(valid_targets, 5)
self.assertTrue(
torch.allclose(
smoothed_targets,
torch.tensor(
[
[1 / 15, 1 / 15, 1 / 15, 1 / 15, 11 / 15],
[0.2, 0.2, 0.2, 0.2, 0.2],
]
),
)
)
def test_unnormalized_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
targets = torch.tensor([[0, 0, 0, 0, 1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 5.07609558, places=5)
def test_ignore_index_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.2,
}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[0.0, 7.0]])
targets = torch.tensor([[-1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 3.50090909)
def test_class_integer_label_smoothing_cross_entropy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.2,
}
crit = LabelSmoothingCrossEntropyLoss.from_config(config)
outputs = torch.tensor([[1.0, 2.0], [0.0, 2.0]])
targets = torch.tensor([[0], [1]])
self.assertAlmostEqual(crit(outputs, targets).item(), 0.76176142)
def test_deep_copy(self):
config = {
"name": "label_smoothing_cross_entropy",
"ignore_index": -1,
"smoothing_param": 0.5,
}
crit = build_loss(config)
self.assertTrue(isinstance(crit, LabelSmoothingCrossEntropyLoss))
outputs = torch.tensor([[0.0, 7.0, 0.0, 0.0, 2.0]])
targets = torch.tensor([[0, 0, 0, 0, 1]])
crit(outputs, targets)
crit2 = copy.deepcopy(crit)
self.assertAlmostEqual(crit2(outputs, targets).item(), 5.07609558, places=5)
| ClassyVision-main | test/losses_label_smoothing_cross_entropy_loss_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from classy_vision.optim.rmsprop_tf import RMSPropTF
from test.generic.optim_test_util import TestOptimizer
class TestRMSPropTFOptimizer(TestOptimizer, unittest.TestCase):
def _get_config(self):
return {
"name": "rmsprop_tf",
"num_epochs": 90,
"lr": 0.1,
"momentum": 0.9,
"weight_decay": 0.0001,
"alpha": 0.9,
"eps": 1e-8,
"centered": False,
}
def _instance_to_test(self):
return RMSPropTF
| ClassyVision-main | test/optim_rmsprop_tf_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import VideoAccuracyMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestVideoAccuracyMeter(ClassificationMeterTest):
def test_accuracy_meter_registry(self):
accuracy_meter = meters.build_meter(
{
"name": "video_accuracy",
"topk": [1, 2],
"clips_per_video_train": 1,
"clips_per_video_test": 2,
}
)
self.assertTrue(isinstance(accuracy_meter, VideoAccuracyMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video
target = torch.tensor([0, 0, 1, 1, 2, 2])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_update_and_reset_test(
meter, model_output, target, expected_value, is_train=False
)
def test_double_meter_update_and_reset(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}.
# Data of two batch is provided
model_outputs = [
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video, in both batches
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# First batch has top-1 accuracy of 1/3.0, top-2 accuracy of 2/3.0
# Second batch has top-1 accuracy of 2/3.0, top-2 accuracy of 3/3.0
expected_value = {"top_1": 2 / 6.0, "top_2": 6 / 6.0}
self.meter_update_and_reset_test(
meter, model_outputs, targets, expected_value, is_train=False
)
def test_meter_invalid_model_output(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[3, 2, 1], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Target has 3 dimensions instead of expected 1 or 2
target = torch.tensor([[[0, 1, 2], [0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
# Target of clips from the same video is not consistent
target = torch.tensor([0, 2, 1, 1, 2, 2])
self.meter_invalid_update_test(meter, model_output, target, is_train=False)
def test_meter_invalid_topk(self):
meter = VideoAccuracyMeter(
topk=[1, 5], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
# Expected value is the expected value of meter1
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 2 is the correct class for sample 1, class 0 for sample 2, etc
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# Value for second update
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value, is_train=False
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
]
# For meter 0, class 2 is the correct class for sample 1, class 0 for sample 2,
# etc
targets = [
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
]
# In first two updates there are 3 correct top-2, 5 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 1 / 6.0, "top_2": 4 / 6.0}, # After one update to each meter
{"top_1": 2 / 12.0, "top_2": 8 / 12.0}, # After two updates to each meter
]
self.meter_distributed_test(
meters, model_outputs, targets, expected_values, is_train=False
)
| ClassyVision-main | test/meters_video_accuracy_meter_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import Mock
from classy_vision.dataset import build_dataset
from classy_vision.hooks import ClassyHook
from classy_vision.losses import build_loss
from classy_vision.models import build_model
from classy_vision.optim import build_optimizer, build_optimizer_schedulers
from classy_vision.optim.param_scheduler import (
ClassyParamScheduler,
register_param_scheduler,
UpdateInterval,
)
from classy_vision.tasks import ClassificationTask, ClassyTask
from classy_vision.trainer import LocalTrainer
@register_param_scheduler("test_scheduler_where")
class TestParamSchedulerWhere(ClassyParamScheduler):
def __init__(self):
self.update_interval = UpdateInterval.STEP
def __call__(self, where):
return where
@classmethod
def from_config(cls, cfg):
return cls()
@register_param_scheduler("test_scheduler_where_double")
class TestParamSchedulerWhereDouble(ClassyParamScheduler):
def __init__(self):
self.update_interval = UpdateInterval.EPOCH
def __call__(self, where):
return where * 2
@classmethod
def from_config(cls, cfg):
return cls()
class TestParamSchedulerIntegration(unittest.TestCase):
def _get_optimizer_config(self, skip_param_schedulers=False):
optimizer_config = {"name": "sgd", "num_epochs": 10, "momentum": 0.9}
if not skip_param_schedulers:
optimizer_config["param_schedulers"] = {
"lr": {"name": "test_scheduler_where"},
"weight_decay": {"name": "test_scheduler_where_double"},
}
return optimizer_config
def _get_config(self, skip_param_schedulers=False):
return {
"loss": {"name": "CrossEntropyLoss"},
"dataset": {
"train": {
"name": "synthetic_image",
"split": "train",
"num_classes": 2,
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 5,
"use_shuffle": True,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
"test": {
"name": "synthetic_image",
"split": "test",
"num_classes": 2,
"crop_size": 20,
"class_ratio": 0.5,
"num_samples": 10,
"seed": 0,
"batchsize_per_replica": 5,
"use_shuffle": False,
"transforms": [
{
"name": "apply_transform_to_key",
"transforms": [
{"name": "ToTensor"},
{
"name": "Normalize",
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
},
],
"key": "input",
}
],
},
},
"model": {
"name": "mlp",
# 3x20x20 = 1200
"input_dim": 1200,
"output_dim": 1000,
"hidden_dims": [10],
},
"meters": {"accuracy": {"topk": [1]}},
"optimizer": self._get_optimizer_config(skip_param_schedulers),
}
def _build_task(self, num_epochs, skip_param_schedulers=False):
config = self._get_config(skip_param_schedulers)
config["optimizer"]["num_epochs"] = num_epochs
task = (
ClassificationTask()
.set_num_epochs(num_epochs)
.set_loss(build_loss(config["loss"]))
.set_model(build_model(config["model"]))
.set_optimizer(build_optimizer(config["optimizer"]))
.set_optimizer_schedulers(build_optimizer_schedulers(config["optimizer"]))
)
for phase_type in ["train", "test"]:
dataset = build_dataset(config["dataset"][phase_type])
task.set_dataset(dataset, phase_type)
self.assertTrue(task is not None)
return task
def test_param_scheduler_epoch(self):
task = self._build_task(num_epochs=3)
where_list = []
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
where_list.append(where)
return 0.1
mock = SchedulerMock(UpdateInterval.EPOCH)
task.set_optimizer_schedulers({"lr": mock})
trainer = LocalTrainer()
trainer.train(task)
self.assertEqual(where_list, [0, 1 / 3, 2 / 3])
def test_param_scheduler_step(self):
task = self._build_task(num_epochs=3)
where_list = []
class SchedulerMock(ClassyParamScheduler):
def __call__(self, where):
where_list.append(where)
return 0.1
mock = SchedulerMock(UpdateInterval.STEP)
task.set_optimizer_schedulers({"lr": mock})
trainer = LocalTrainer()
trainer.train(task)
# We have 10 samples, batch size is 5. Each epoch is done in two steps.
# The first call is the initialization and the second call is inside the step()
self.assertEqual(where_list, [0, 0, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6])
def test_no_param_schedulers(self):
task = self._build_task(num_epochs=3, skip_param_schedulers=True)
# there should be no param schedulers
self.assertEqual(task.optimizer_schedulers, {})
# we should still be able to train the task
trainer = LocalTrainer()
trainer.train(task)
def test_hook(self):
task = self._build_task(num_epochs=3)
lr_list = []
weight_decay_list = []
momentum_list = []
test_instance = self
class TestHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def on_step(self, task: ClassyTask) -> None:
if not task.train:
return
# make sure we have non-zero param groups
test_instance.assertGreater(len(task.optimizer.param_groups), 0)
lr_list.append(task.optimizer.options_view.lr)
weight_decay_list.append(task.optimizer.options_view.weight_decay)
momentum_list.append(task.optimizer.options_view.momentum)
task.set_hooks([TestHook()])
trainer = LocalTrainer()
trainer.train(task)
# We have 10 samples, batch size is 5. Each epoch takes two steps. So,
# there will be a total of 6 steps.
# the lr scheduler uses a step update interval
self.assertEqual(lr_list, [0 / 6, 1 / 6, 2 / 6, 3 / 6, 4 / 6, 5 / 6])
# the weight decay scheduler uses an epoch update interval
self.assertEqual(weight_decay_list, [0 / 6, 0 / 6, 4 / 6, 4 / 6, 8 / 6, 8 / 6])
self.assertEqual(momentum_list, [0.9, 0.9, 0.9, 0.9, 0.9, 0.9])
def test_update_interval_from_config(self):
# test a config which specifies an update interval
config = {"update_interval": "epoch"}
self.assertEqual(
UpdateInterval.from_config(config, UpdateInterval.STEP),
UpdateInterval.EPOCH,
)
# test a config which doesn't specify an update interval
config = {}
self.assertEqual(
UpdateInterval.from_config(config, UpdateInterval.STEP), UpdateInterval.STEP
)
# test a config with an invalid update interval
config = {"update_interval": "invalid"}
with self.assertRaises(Exception):
UpdateInterval.from_config(config, UpdateInterval.EPOCH)
| ClassyVision-main | test/optim_param_scheduler_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.