python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
# Copyright 2022 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exploiting the Intrinsic Neighborhood Structure for SFDA."""
from absl import logging
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from chirp.projects.sfda import method_utils
from chirp.projects.sfda import model_utils
from clu import metrics as clu_metrics
import flax
import flax.jax_utils as flax_utils
import flax.linen as flax_linen
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
@flax.struct.dataclass
class NRCLoss(clu_metrics.Metric):
"""Computes NRC's loss for the standard single-label case."""
probabilities_sum: jnp.ndarray
nn_loss_sum: jnp.ndarray
extended_nn_loss_sum: jnp.ndarray
label_mask: jnp.ndarray | None
n_samples: int
@classmethod
def from_model_output(
cls,
probabilities: jnp.ndarray,
nn_probability: jnp.ndarray,
extended_nn_probability: jnp.ndarray,
nn_weight: jnp.ndarray,
extended_nn_weight: jnp.ndarray,
label_mask: jnp.ndarray | None,
**_,
) -> "NRCLoss":
"""Computes the standard extended nearest-neighbors loss.
Args:
probabilities: Model's probability for the batch.
nn_probability: Batch's nearest-neighbors' probability vectors.
extended_nn_probability: Batch's extended nearest-neighbors' probability
vectors.
nn_weight: The weight used for each nearest-neighbor. Expected shape
[batch_size, nn]
extended_nn_weight: The weight used for each extended nearest-neighbor.
Expected shape [1] (as the same weight is used for all extended
neighbors).
Returns:
NRCLoss: An instance of NRCLoss.
"""
if label_mask is None:
label_mask = jnp.ones_like(probabilities)
nn_loss = -(
nn_weight
* (
label_mask[:, None, :] * probabilities[:, None, :] * nn_probability
).sum(axis=-1)
).sum(
axis=-1
) # [batch_size]
extended_nn_loss = -(
extended_nn_weight
* (
label_mask[:, None, None, :]
* probabilities[:, None, None, :]
* extended_nn_probability
).sum(axis=-1)
).sum(
axis=1
) # [batch_size]
probabilities_sum = probabilities.sum(axis=0) # [num classes]
return cls(
probabilities_sum=probabilities_sum,
nn_loss_sum=nn_loss.sum(),
extended_nn_loss_sum=extended_nn_loss.sum(),
label_mask=label_mask,
n_samples=probabilities.shape[0],
)
def merge(self, other: "NRCLoss") -> "NRCLoss":
return type(self)(
probabilities_sum=self.probabilities_sum + other.probabilities_sum,
nn_loss_sum=self.nn_loss_sum + other.nn_loss_sum,
extended_nn_loss_sum=self.extended_nn_loss_sum
+ other.extended_nn_loss_sum,
n_samples=self.n_samples + other.n_samples,
label_mask=other.label_mask,
)
def compute(self):
probabilities_marginal = self.probabilities_sum / self.n_samples
marginal_entropy = losses.label_ent(
probabilities=probabilities_marginal, label_mask=self.label_mask[0]
)
return (
1 / self.n_samples * (self.nn_loss_sum + self.extended_nn_loss_sum)
- marginal_entropy
)
@flax.struct.dataclass
class NRCMultiLoss(clu_metrics.Metric):
"""Computes NRC's loss for the multi-label case."""
probabilities_sum: jnp.ndarray
nn_loss_sum: jnp.ndarray
extended_nn_loss_sum: jnp.ndarray
label_mask: jnp.ndarray
n_samples: int
@classmethod
def from_model_output(
cls,
probabilities: jnp.ndarray,
nn_probability: jnp.ndarray,
extended_nn_probability: jnp.ndarray,
nn_weight: jnp.ndarray,
extended_nn_weight: jnp.ndarray,
label_mask: jnp.ndarray,
**_,
) -> "NRCMultiLoss":
if label_mask is not None:
# probabilities have not been masked but nn_probability has been, so we
# pad the latter to bring it to the same dimensionality as the former.
reference_mask = label_mask[0]
_, num_nn, num_classes_used = nn_probability.shape
nn_probability_flatter = nn_probability.reshape((-1, num_classes_used))
batch_size = nn_probability_flatter.shape[0]
num_classes_total = reference_mask.shape[0]
padded_nn_prob = jnp.zeros((batch_size, num_classes_total))
col_index = jnp.tile(
jnp.nonzero(reference_mask, size=num_classes_used)[0], batch_size
)
row_index = jnp.repeat(jnp.arange(batch_size), num_classes_used)
nn_probability_flatter = padded_nn_prob.at[(row_index, col_index)].set(
nn_probability_flatter.flatten()
)
nn_probability = nn_probability_flatter.reshape(
(-1, num_nn, num_classes_total)
)
_, num_nn, num_enn, num_classes_used = extended_nn_probability.shape
enn_probability_flatter = extended_nn_probability.reshape(
(-1, num_classes_used)
)
batch_size = enn_probability_flatter.shape[0]
padded_enn_prob = jnp.zeros((batch_size, num_classes_total))
col_index = jnp.tile(
jnp.nonzero(reference_mask, size=num_classes_used)[0], batch_size
)
row_index = jnp.repeat(jnp.arange(batch_size), num_classes_used)
enn_probability_flatter = padded_enn_prob.at[(row_index, col_index)].set(
enn_probability_flatter.flatten()
)
extended_nn_probability = enn_probability_flatter.reshape(
(-1, num_nn, num_enn, num_classes_total)
)
def dot_product(probability_a, probability_b):
return probability_a * probability_b + (1 - probability_a) * (
1 - probability_b
)
nn_loss = -(
label_mask
* (
nn_weight[..., None] # [batch_size, nn, 1]
* (dot_product(probabilities[:, None, :], nn_probability))
).sum(axis=1)
).sum(-1) / label_mask.sum(
-1
) # [batch_size]
extended_nn_loss = -(
label_mask
* ( # pytype: disable=wrong-arg-types # jax-ndarray
extended_nn_weight
* (
dot_product(
probabilities[:, None, None, :], extended_nn_probability
)
)
).sum(axis=[1, 2])
).sum(-1) / label_mask.sum(
-1
) # [batch_size]
probabilities_sum = probabilities.sum(axis=0) # [num classes]
return cls(
probabilities_sum=probabilities_sum,
nn_loss_sum=nn_loss.sum(),
extended_nn_loss_sum=extended_nn_loss.sum(),
label_mask=label_mask,
n_samples=probabilities.shape[0],
)
def merge(self, other: "NRCMultiLoss") -> "NRCMultiLoss":
return type(self)(
probabilities_sum=self.probabilities_sum + other.probabilities_sum,
nn_loss_sum=self.nn_loss_sum + other.nn_loss_sum,
extended_nn_loss_sum=self.extended_nn_loss_sum
+ other.extended_nn_loss_sum,
n_samples=self.n_samples + other.n_samples,
label_mask=other.label_mask,
)
def compute(self):
probabilities_marginal = self.probabilities_sum / self.n_samples
marginal_entropy = losses.label_binary_ent(
probabilities=probabilities_marginal, label_mask=self.label_mask[0]
)
return (
1 / self.n_samples * (self.nn_loss_sum + self.extended_nn_loss_sum)
- marginal_entropy
)
class NRC(adapt.SFDAMethod):
"""Exploiting the Intrinsic Neighborhood Structure for SFDA."""
_CITATION = (
"Yang, Shiqi, et al. 'Exploiting the intrinsic neighborhood structure"
"for source-free domain adaptation.' Advances in Neural Information"
"Processing Systems 34 (2021): 29393-29405."
)
@staticmethod
def compute_nearest_neighbors(
batch_feature: jnp.ndarray,
dataset_feature: jnp.ndarray,
nn: int,
memory_efficient_computation: bool = False,
) -> jnp.ndarray:
"""Compute batch_feature's nearest-neighbors among dataset_feature.
Args:
batch_feature: The features for the provided batch of data, shape
[batch_size, feature_dim]
dataset_feature: The features for the whole dataset, shape [dataset_size,
feature_dim]
nn: The number of nearest-neighbors to use.
memory_efficient_computation: whether to use a memory-efficient
implementation.
Returns:
The indices of batch_feature's nn nearest-neighbors among
dataset_feature. Shape [batch_size, nn]
Raises:
ValueError if batch_feature and dataset_feature's shape don't match.
"""
batch_shape = batch_feature.shape
dataset_shape = dataset_feature.shape
if batch_feature.ndim != dataset_feature.ndim or (
batch_shape[-1] != dataset_shape[-1]
):
raise ValueError(
"Batch features and dataset features' shapes are not consistent."
f"Currently batch_feature: {batch_shape} and dataset_feature:"
f"{dataset_shape}"
)
# Compute the nearest-neighbors
neighbors = min(dataset_shape[0], nn + 1)
if memory_efficient_computation:
# We loop over samples in the current batch to avoid storing a
# batch_size x dataset_size float array. That slows down computation, but
# reduces memory footprint, which becomes the bottleneck for large
# datasets.
nn_indices = []
for sample_feature in batch_feature:
pairwise_distances = method_utils.jax_cdist(
jnp.expand_dims(sample_feature, 0), dataset_feature
) # [1, dataset_size]
nn_indices.append(
jax.lax.top_k(-pairwise_distances, neighbors)[1][:, 1:]
) # [1, neighbors]
nn_indices = jnp.concatenate(
nn_indices, axis=0
) # [batch_size, neighbors]
else:
pairwise_distances = method_utils.jax_cdist(
batch_feature, dataset_feature
)
nn_indices = jax.lax.top_k(-pairwise_distances, neighbors)[1][
:, 1:
] # [batch_size, neighbors]
return nn_indices
def before_run(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: adapt.AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs,
) -> adapt.AdaptationState:
"""Initialize the probability and feature banks.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
An updated version of adaptation_state, where method_state contains
all initialized banks.
"""
logging.info("Initializing banks...")
# Extract embeddings and model's probabilities.
forward_result = method_utils.forward_dataset(
dataset=adaptation_dataset,
adaptation_state=adaptation_state,
model_bundle=model_bundle,
modality=modality,
multi_label=multi_label,
use_batch_statistics=method_kwargs["update_bn_statistics"],
)
# Store everything in the method_state dictionnary.
ids = forward_result["id"]
method_state = {
"dataset_feature": forward_result["embedding"],
"dataset_probability": forward_result["proba"],
"id2index": {ids[i]: i for i in range(len(ids))},
}
adaptation_state = adaptation_state.replace(method_state=method_state)
return adaptation_state
def before_iter(
self,
key: jax.random.PRNGKeyArray,
batch: dict[str, np.ndarray],
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs,
) -> tuple[adapt.AdaptationState, dict[str, jnp.ndarray]]:
"""Compute the (extended-)nearest-neighbors probability and weights.
NRC relies on aligning model's probabilities with 'pseudo-labels'
computed from the (extended) nearest-neighbors. Here, we compute those
pseudo-labels, and their associated `weights` (i.e. 1 for reciprocal
nearest-neighbors, and 'base_affinity' for the others).
Args:
key: The jax random key used for random operations.
batch: The current batch of data.
adaptation_state: The current state of adaptation.
model_bundle: The ModelBundle used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwarg
Returns:
A dictionary containing direct and extended nearest-neighbors's
probability vectors and weights, for each sample in the batch.
"""
method_state = flax_utils.unreplicate(adaptation_state.method_state)
id2index = method_state["id2index"]
batch_indices = np.array(
[id2index[x] for x in flax_utils.unreplicate(batch["tfds_id"])]
)
reference_label_mask = method_utils.get_label_mask(batch)
# Obtain the model's output for the current batch.
forward_step = self.cache_get_forward_step(
model_bundle.model, modality, method_kwargs["update_bn_statistics"]
)
model_outputs = forward_step( # pytype: disable=wrong-arg-types # jax-ndarray
adapt.keep_jax_types(batch),
adaptation_state.model_state,
adaptation_state.model_params,
None,
)
model_outputs = flax_utils.unreplicate(model_outputs)
model_outputs = method_utils.maybe_restrict_labels(
model_outputs, reference_label_mask, adaptation_state
)
logit2proba = flax_linen.sigmoid if multi_label else flax_linen.softmax
# Compute nearest-neighbors and extended nearest-neighbors indices.
nn_indices = self.compute_nearest_neighbors(
batch_feature=model_outputs.embedding,
dataset_feature=method_state["dataset_feature"],
nn=method_kwargs["nn"],
) # [batch_size, nn]
extended_nn_indices = jnp.stack(
[ # pylint: disable=g-complex-comprehension
self.compute_nearest_neighbors(
batch_feature=method_state["dataset_feature"][
sample_nn_indices
],
dataset_feature=method_state["dataset_feature"],
nn=method_kwargs["extended_nn"],
)
for sample_nn_indices in nn_indices # [nn, extended_nn]
],
axis=0,
) # [batch_size, nn, extended_nn]
# Get nearest-neighbors and extended nearest-neighbors' probability.
nn_probability = method_state["dataset_probability"][
nn_indices
] # [batch_size, nn, num_classes]
extended_nn_probability = method_state["dataset_probability"][
extended_nn_indices
] # [batch_size, nn, extended_nn, num_classes]
# Compute weights for nearest-neighbors and extended nearest-neighbors.
# Those indicate the importance of each (extended) nearest-neighbor in
# the loss.
match = (extended_nn_indices == batch_indices[:, None, None]).sum(
-1
) # [batch_size, nn]
assert match.ndim == 2
nn_weight = jnp.where(
match > 0, match, method_kwargs["base_affinity"]
) # [batch_size, nn]
extended_nn_weight = jnp.array([method_kwargs["base_affinity"]])
# Update banks
method_state["dataset_feature"] = (
method_state["dataset_feature"]
.at[batch_indices]
.set(model_outputs.embedding)
)
method_state["dataset_probability"] = (
method_state["dataset_probability"]
.at[batch_indices]
.set(logit2proba(model_outputs.label))
)
# Pad back to the original space
if "label_mask" in batch:
reference_label_mask = method_utils.get_label_mask(batch)
batch_size, nn, extended_nn, num_classes = extended_nn_probability.shape
nn_probability = method_utils.pad_pseudo_label(
reference_label_mask,
nn_probability.reshape(-1, num_classes),
adaptation_state,
)
extended_nn_probability = method_utils.pad_pseudo_label(
reference_label_mask,
extended_nn_probability.reshape(-1, num_classes),
adaptation_state,
)
if reference_label_mask is not None:
nn_probability = nn_probability.reshape(
batch_size, nn, reference_label_mask.shape[-1]
)
extended_nn_probability = extended_nn_probability.reshape(
batch_size, nn, extended_nn, reference_label_mask.shape[-1]
)
return adaptation_state, {
"nn_weight": flax_utils.replicate(nn_weight),
"extended_nn_weight": flax_utils.replicate(extended_nn_weight),
"nn_probability": flax_utils.replicate(nn_probability),
"extended_nn_probability": flax_utils.replicate(
extended_nn_probability
),
}
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation.
Args:
supervised: Whether the problem is supervised. Only used to know if we can
track supervised metrics, such as accuracy.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Method's kwargs.
Returns:
A collection of metrics.
"""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
if multi_label:
metrics_dict["main_loss"] = NRCMultiLoss
else:
metrics_dict["main_loss"] = NRCLoss
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test-Time Entropy Minimization (TENT) method."""
import functools
from absl import logging
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from chirp.projects.sfda import method_utils
from chirp.projects.sfda import model_utils
from clu import metrics as clu_metrics
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
class DUST(adapt.SFDAMethod):
"""Adaptation of Dropout-based Uncertainty-driven Self-Training for SFDA.
Note that DUST itself is not an SFDA method, because it assumes the
availability of the labelled source data during the adaptation phase. We
propose a particular way to adapt it to the SFDA setting.
"""
_CITATION = (
"Khurana, Sameer, et al. 'Unsupervised domain adaptation forspeech "
"recognition via uncertainty driven self-training.' ICASSP 2021-2021 "
"IEEE International Conference on Acoustics, Speech and Signal "
"Processing(ICASSP). IEEE, 2021."
)
def before_epoch(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: adapt.AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> adapt.AdaptationState:
"""Compute the pseudo-labels, the masks and store them in memory.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
The adaptation state, with a potentially updated 'method_state' attribute.
"""
logging.info("Preparing pseudo-labels...")
forward_fn = functools.partial(
method_utils.forward_dataset,
dataset=adaptation_dataset,
adaptation_state=adaptation_state,
model_bundle=model_bundle,
modality=modality,
multi_label=multi_label,
use_batch_statistics=method_kwargs["update_bn_statistics"],
)
# Compute model's reference predictions (no dropout used)
reference_forward_result = forward_fn(train=False)
reference_probability = reference_forward_result["proba"]
# We perform multiple noisy forward passes, and keep track of the
# KL-divergence between the reference predictions and noisy predictions.
# Note that here we depart from the edit distance proposed in the DUST paper
# for speech, since the KL-divergence between class label predictions is
# more appropriate for our use-case.
kl_distances = []
kl_fn = losses.label_binary_kl if multi_label else losses.label_kl
for _ in range(method_kwargs["num_random_passes"]):
random_pass_key, key = jax.random.split(key)
noisy_probability = forward_fn(key=random_pass_key, train=True)["proba"]
kl_distances.append(
kl_fn(reference_probability, noisy_probability, label_mask=None)
)
# We compute the mask by only keeping samples whose maximum kl_divergence
# observed is lower than a pre-defined threshold.
pseudo_label_mask = (
jnp.stack(kl_distances, 0).max(axis=0) < method_kwargs["kl_threshold"]
)
sample_ids = reference_forward_result["id"]
pseudo_label = reference_probability
# method_state will act as a memory, from which pseudo-labels and masks
# will be grabbed on-the-go over the next epoch of adaptation.
method_state = {
"pseudo_label": pseudo_label,
"pseudo_label_mask": pseudo_label_mask,
"id2index": {sample_ids[i]: i for i in range(len(sample_ids))},
}
adaptation_state = adaptation_state.replace(method_state=method_state)
return adaptation_state
def before_iter(
self,
key: jax.random.PRNGKeyArray,
batch: dict[str, np.ndarray],
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> tuple[adapt.AdaptationState, dict[str, jnp.ndarray]]:
"""Grab the pseudo-labels and masks for the current batch.
Args:
key: The jax random key used for random operations.
batch: The current batch of data.
adaptation_state: The current state of adaptation.
model_bundle: The ModelBundle used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwarg
Returns:
A dictionary containing the pseudo-labels and mask to use for the
iteration.
"""
method_state = flax_utils.unreplicate(adaptation_state.method_state)
id2index = method_state["id2index"]
batch_indices = np.array(
[id2index[x] for x in flax_utils.unreplicate(batch["tfds_id"])]
)
pseudo_label = method_state["pseudo_label"][batch_indices]
pseudo_label_mask = method_state["pseudo_label_mask"][batch_indices]
# pad pseudo-labels to match model output as needed.
label_mask = method_utils.get_label_mask(batch)
pseudo_label = method_utils.pad_pseudo_label(
label_mask, pseudo_label, adaptation_state
)
if multi_label:
pseudo_label_mask = method_utils.pad_pseudo_label(
label_mask, pseudo_label_mask, adaptation_state
)
return adaptation_state, {
"pseudo_label": flax_utils.replicate(pseudo_label),
"pseudo_label_mask": flax_utils.replicate(pseudo_label_mask),
}
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation."""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
def single_label_loss_fn(
probabilities, pseudo_label, pseudo_label_mask, label_mask, **_
):
pl_xent = losses.label_xent(
probabilities=probabilities,
label=pseudo_label,
label_mask=label_mask,
sample_mask=pseudo_label_mask,
)
return pl_xent
def multi_label_loss_fn(
probabilities: jnp.ndarray,
pseudo_label: jnp.ndarray,
pseudo_label_mask: jnp.ndarray,
label_mask: jnp.ndarray,
**_
):
# Sample's probabilities that end up contributing to the final computation
# are those left unmasked by label_mask (defined by the target domain and
# restricting the set of possible species) and are confident enough (
# left unmasked by pseudo_label_mask).
pl_xent = losses.label_binary_xent(
probabilities=probabilities,
label=pseudo_label,
label_mask=label_mask * pseudo_label_mask,
)
return pl_xent
loss_fn = multi_label_loss_fn if multi_label else single_label_loss_fn
metrics_dict["main_loss"] = clu_metrics.Average.from_fun(loss_fn)
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NOisy TEacher-student with Laplacian Adjustment (NOTELA), our method."""
import functools
from absl import logging
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from chirp.projects.sfda import method_utils
from chirp.projects.sfda import model_utils
from clu import metrics as clu_metrics
import flax.jax_utils as flax_utils
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from scipy import sparse
import tensorflow as tf
class NOTELA(adapt.SFDAMethod):
"""NOisy TEacher-student with Laplacian Adjustment (NOTELA), our method.
It builds upon Dropout Student by incorporating a Laplacian regularization
to the teacher step. NOTELA works in two different modes:
- offline mode: Pseudo-labels are computed only once every epoch (before the
epoch starts).
- online mode: We track a memory of the dataset's extracted features and
probabilities. Pseudo-labels are computed on-the-go by comparing samples
from the current batch to the features/probabilities in memory.
In both cases, the student-step remains to match the pseudo-labels using a
noisy (dropout) model forward.
"""
@staticmethod
def compute_nearest_neighbors(
batch_feature: jnp.ndarray,
dataset_feature: jnp.ndarray,
knn: int,
sparse_storage: bool,
memory_efficient_computation: bool = False,
) -> jnp.ndarray | sparse.csr_matrix:
"""Compute batch_feature's nearest-neighbors among dataset_feature.
Args:
batch_feature: The features for the provided batch of data, shape
[batch_size, feature_dim]
dataset_feature: The features for the whole dataset, shape [dataset_size,
feature_dim]
knn: The number of nearest-neighbors to use.
sparse_storage: whether to use sparse storage for the affinity matrix.
memory_efficient_computation: Whether to make computation memory
efficient. This option trades speed for memory footprint by looping over
samples in the batch instead of fully vectorizing nearest-neighbor
computation. For large datasets, memory usage can be a bottleneck, which
is why we set this option to True by default.
Returns:
The batch's nearest-neighbors affinity matrix of shape
[batch_size, dataset_size], where position (i, j) indicates whether
dataset_feature[j] belongs to batch_feature[i]'s nearest-neighbors.
Raises:
ValueError: If batch_feature and dataset_feature don't have the same
number of dimensions, or if their feature dimension don't match.
"""
batch_shape = batch_feature.shape
dataset_shape = dataset_feature.shape
if batch_feature.ndim != dataset_feature.ndim or (
batch_shape[-1] != dataset_shape[-1]
):
raise ValueError(
"Batch features and dataset features' shapes are not consistent."
f"(batch_feature: {batch_shape} and dataset_feature: {dataset_shape})"
)
# Compute the nearest-neighbors
neighbors = min(dataset_shape[0], knn)
if memory_efficient_computation:
# We loop over samples in the current batch to avoid storing a
# batch_size x dataset_size float array. That slows down computation, but
# reduces memory footprint, which becomes the bottleneck for large
# datasets.
col_indices = []
for sample_feature in batch_feature:
pairwise_distances = method_utils.jax_cdist(
jnp.expand_dims(sample_feature, 0), dataset_feature
) # [1, dataset_size]
col_indices.append(
jax.lax.top_k(-pairwise_distances, neighbors)[1][:, 1:]
) # [1, neighbors-1]
col_indices = jnp.stack(col_indices)
else:
pairwise_distances = method_utils.jax_cdist(
batch_feature, dataset_feature
) # [batch_size, dataset_size]
col_indices = jax.lax.top_k(-pairwise_distances, neighbors)[1][
:, 1:
] # [batch_size, neighbors-1]
col_indices = col_indices.flatten() # [batch_size * neighbors-1]
row_indices = jnp.repeat(
np.arange(batch_shape[0]), neighbors - 1
) # [0, ..., 0, 1, ...]
if sparse_storage:
data = jnp.ones(row_indices.shape[0])
nn_matrix = sparse.csr_matrix(
(data, (row_indices, col_indices)),
shape=(batch_shape[0], dataset_shape[0]),
)
else:
nn_matrix = jnp.zeros((batch_shape[0], dataset_shape[0]), dtype=jnp.uint8)
nn_matrix = nn_matrix.at[row_indices, col_indices].set(1)
return nn_matrix
@staticmethod
def teacher_step(
batch_proba: jnp.ndarray,
dataset_proba: jnp.ndarray,
nn_matrix: jnp.ndarray | sparse.csr_matrix,
lambda_: float,
alpha: float = 1.0,
normalize_pseudo_labels: bool = True,
eps: float = 1e-8,
) -> jnp.ndarray:
"""Computes the pseudo-labels (teacher-step) following Eq.(3) in the paper.
Args:
batch_proba: The model's probabilities on the current batch of data.
Expected shape [batch_size, proba_dim]
dataset_proba: The model's probabilities on the rest of the dataset.
Expected shape [dataset_size, proba_dim]
nn_matrix: The affinity between the points in the current batch
(associated to `batch_proba`) and the remaining of the points
(associated to `dataset_proba`), of shape [batch_size, dataset_size].
Specifically, position [i,j] informs if point j belongs to i's
nearest-neighbors.
lambda_: Weight controlling the Laplacian regularization.
alpha: Weight controlling the Softness regularization
normalize_pseudo_labels: Whether to normalize pseudo-labels to turn them
into valid probability distributions. This option should be kept to
True, and only be used for experimental purposes.
eps: For numerical stability.
Returns:
The soft pseudo-labels for the current batch of data, shape
[batch_size, proba_dim]
"""
if isinstance(nn_matrix, sparse.csr_matrix):
# By default, sum operation on a csr_matrix keeps the dimensions of the
# original matrix.
denominator = nn_matrix.sum(axis=-1)
else:
denominator = nn_matrix.sum(axis=-1, keepdims=True)
# In the limit where alpha goes to zero, we can rewrite the expression as
#
# pseudo_label = [batch_proba * jnp.exp(lambda_ * ...)] ** (1 / alpha)
#
# and see that the normalized pseudo-label probabilities take value 1 if
# they have the maximum value for the expression above over the class axis
# and zero otherwise.
if alpha == 0 and normalize_pseudo_labels:
pseudo_label = batch_proba * jnp.exp(
lambda_
* (nn_matrix @ dataset_proba)
/ (denominator + eps) # [*, batch_size, proba_dim]
)
pseudo_label = (
pseudo_label == pseudo_label.max(axis=-1, keepdims=True)
).astype(jnp.float32)
# If more than one class is maximally probable, we need to renormalize the
# distribution to be uniform over the maximally-probable classes.
pseudo_label /= pseudo_label.sum(axis=-1, keepdims=True)
else:
pseudo_label = batch_proba ** (1 / alpha) * jnp.exp(
(lambda_ / alpha) * (nn_matrix @ dataset_proba) / (denominator + eps)
) # [*, batch_size, proba_dim]
if normalize_pseudo_labels:
pseudo_label /= pseudo_label.sum(axis=-1, keepdims=True) + eps
return pseudo_label
def before_run(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: adapt.AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs,
) -> adapt.AdaptationState:
"""Initialize the memories when using NOTELA's online mode.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
An updated version of adaptation_state, where method_state contains
all initialized memories.
"""
if method_kwargs["online_pl_updates"]:
logging.info("Initializing memories...")
# Extract embeddings and model's probabilities.
forward_result = method_utils.forward_dataset(
dataset=adaptation_dataset,
adaptation_state=adaptation_state,
model_bundle=model_bundle,
modality=modality,
multi_label=multi_label,
use_batch_statistics=method_kwargs["update_bn_statistics"],
)
# Initialize global nearest-neighbor matrix
nn_matrix = self.compute_nearest_neighbors(
batch_feature=forward_result["embedding"],
dataset_feature=forward_result["embedding"],
knn=method_kwargs["knn"],
sparse_storage=True,
memory_efficient_computation=True,
)
# Store everything in the method_state dictionnary. The nn_matrix cannot
# be directly stored as a sparse.csr_matrix (otherwise jax won't be able
# to replicate the method_state. Instead, it is stored as a jnp array,
# whose columns indicate the row and column indexes of all non-zeros
# elements in the matrix.
ids = forward_result["id"]
method_state = {
"dataset_feature": forward_result["embedding"],
"dataset_proba": forward_result["proba"],
"nn_matrix": jnp.stack(nn_matrix.nonzero(), axis=1), # [?, 2]
"id2index": {ids[i]: i for i in range(len(ids))},
}
adaptation_state = adaptation_state.replace(method_state=method_state)
return adaptation_state
def before_epoch(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: adapt.AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs,
) -> adapt.AdaptationState:
"""In 'offline mode', compute the pseudo-labels and store them in memory.
If 'offline mode' is not activated, nothing needs to be done at that stage,
as pseudo-labels will be computed on-the-go.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
The adaptation state, with a potentially updated 'method_state' attribute.
"""
if not method_kwargs["online_pl_updates"]:
logging.info("Preparing pseudo-labels...")
# Extract embeddings and model's probabilities.
forward_result = method_utils.forward_dataset(
dataset=adaptation_dataset,
adaptation_state=adaptation_state,
model_bundle=model_bundle,
modality=modality,
multi_label=multi_label,
use_batch_statistics=method_kwargs["update_bn_statistics"],
)
# Compute pseudo-labels that will be used during the next epoch of
# adaptation.
_, pseudo_label = self.compute_pseudo_label(
batch_feature=forward_result["embedding"],
dataset_feature=forward_result["embedding"],
batch_proba=forward_result["proba"],
dataset_proba=forward_result["proba"],
multi_label=multi_label,
knn=method_kwargs["knn"],
lambda_=method_kwargs["lambda_"],
alpha=method_kwargs["alpha"],
use_mutual_nn=method_kwargs["use_mutual_nn"],
normalize_pseudo_labels=method_kwargs["normalize_pseudo_labels"],
)
# method_state will act as a memory, from which pseudo-labels will be
# grabbed on-the-go over the next epoch of adaptation.
sample_ids = forward_result["id"]
method_state = {
"pseudo_label": pseudo_label,
"id2index": {sample_ids[i]: i for i in range(len(sample_ids))},
}
adaptation_state = adaptation_state.replace(method_state=method_state)
return adaptation_state
def before_iter(
self,
key: jax.random.PRNGKeyArray,
batch: dict[str, np.ndarray],
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs,
) -> tuple[adapt.AdaptationState, dict[str, jnp.ndarray]]:
"""Grab or compute the pseudo-labels for the current batch.
In 'offline mode', we only grab pre-computed pseudo-labels from the
pseudo_label memory.
Args:
key: The jax random key used for random operations.
batch: The current batch of data.
adaptation_state: The current state of adaptation.
model_bundle: The ModelBundle used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwarg
Returns:
If using offline mode, the untouched adaptation_state. Otherwise,an
updated version in which the method_state's memories have been
updated
A dictionary containing the pseudo-labels to use for the iteration.
"""
method_state = flax_utils.unreplicate(adaptation_state.method_state)
id2index = method_state["id2index"]
batch_indices = np.array(
[id2index[x] for x in flax_utils.unreplicate(batch["tfds_id"])]
)
reference_label_mask = method_utils.get_label_mask(batch)
if method_kwargs["online_pl_updates"]:
# In the online version, we compute the pseudo-labels on-the-go.
forward_step = self.cache_get_forward_step(
model_bundle.model, modality, method_kwargs["update_bn_statistics"]
)
model_outputs = forward_step( # pytype: disable=wrong-arg-types # jax-ndarray
adapt.keep_jax_types(batch),
adaptation_state.model_state,
adaptation_state.model_params,
None,
)
model_outputs = flax_utils.unreplicate(model_outputs)
model_outputs = method_utils.maybe_restrict_labels(
model_outputs, reference_label_mask, adaptation_state
)
logit2proba = nn.sigmoid if multi_label else nn.softmax
previous_nn_matrix = self.indices_to_sparse_matrix(
method_state["nn_matrix"],
(
method_state["dataset_feature"].shape[0],
method_state["dataset_feature"].shape[0],
),
)
batch_nn_matrix, pseudo_label = self.compute_pseudo_label(
batch_feature=model_outputs.embedding,
dataset_feature=method_state["dataset_feature"],
batch_proba=logit2proba(model_outputs.label),
dataset_proba=method_state["dataset_proba"],
multi_label=multi_label,
knn=method_kwargs["knn"],
lambda_=method_kwargs["lambda_"],
alpha=method_kwargs["alpha"],
use_mutual_nn=method_kwargs["use_mutual_nn"],
normalize_pseudo_labels=method_kwargs["normalize_pseudo_labels"],
transpose_nn_matrix=previous_nn_matrix.T[batch_indices],
)
# Update global information
previous_nn_matrix[batch_indices] = batch_nn_matrix
method_state["dataset_feature"] = (
method_state["dataset_feature"]
.at[batch_indices]
.set(model_outputs.embedding)
)
method_state["dataset_proba"] = (
method_state["dataset_proba"]
.at[batch_indices]
.set(logit2proba(model_outputs.label))
)
method_state["nn_matrix"] = jnp.stack(previous_nn_matrix.nonzero(), 1)
adaptation_state = adaptation_state.replace(
method_state=flax_utils.replicate(method_state)
)
else:
# In the offline version, we simply grab the pseudo-labels that were
# computed before the epoch.
pseudo_label = method_state["pseudo_label"][batch_indices]
# Project back the pseudo-labels to the global label space, if needed.
pseudo_label = method_utils.pad_pseudo_label(
reference_label_mask, pseudo_label, adaptation_state
)
return adaptation_state, {
"pseudo_label": flax_utils.replicate(pseudo_label)
}
@staticmethod
def indices_to_sparse_matrix(
indices: jnp.ndarray, shape: tuple[int, int]
) -> sparse.csr_matrix:
"""Converts non-zero indices to a sparse.csr_matrix.
Args:
indices: Non-zero indices of the matrix, of shape [?, 2]. Each row
indicates (row_index, column_index) of a non-zero element of the matrix.
shape: Shape of the final matrix.
Returns:
The sparse.csr_matrix of shape `shape`, with non-zero elements at
`indices`.
"""
row_indices, col_indices = indices[:, 0], indices[:, 1]
data = jnp.ones(row_indices.shape[0])
return sparse.csr_matrix((data, (row_indices, col_indices)), shape=shape)
def compute_pseudo_label(
self,
batch_feature: jnp.ndarray,
dataset_feature: jnp.ndarray,
batch_proba: jnp.ndarray,
dataset_proba: jnp.ndarray,
multi_label: bool,
knn: int,
lambda_: float,
alpha: float,
use_mutual_nn: bool,
normalize_pseudo_labels: bool,
transpose_nn_matrix: sparse.csr_matrix | None = None,
) -> tuple[sparse.csr_matrix, jnp.ndarray]:
"""The pipeline for computing NOTELA's pseudo labels.
First, we compute the nearest neighbors of each point in batch_feature
to each point in dataset_feature. Then, we compute the pseudo-labels
using Eq. (3) from the paper.
Args:
batch_feature: The features for the provided batch of data, shape
[batch_size, feature_dim]
dataset_feature: The features for the whole dataset, shape [dataset_size,
feature_dim]
batch_proba: The model's proba for the current batch of data, shape
[batch_size, num_classes]
dataset_proba: The model's proba for the whole dataset, shape
[dataset_size, num_classes]
multi_label: Whether this is a multi-label problem.
knn: The number of nearest-neighbors use to compute the affinity matrix.
lambda_: The weight controlling the Laplacian regularization.
alpha: The weight controlling the softness regularization.
use_mutual_nn: Whether to use mutual nearest-neighbors (points i and j
must belong to each other's nearest-neighbor to be called 'mutual') or
standard nearest-neighbors.
normalize_pseudo_labels: Whether to normalize pseudo-labels to turn them
into valid probability distributions. This option should generally be
set to True, and only be used for experimental purposes.
transpose_nn_matrix: The relevant chunk of the nearest-neighbor's
transpose. Precisely, matrix of shape [batch_size, dataset_size], where
position [i,j] informs whether point i (in the current batch) belongs to
point j (any point in the dataset) nearest-neighbors. If set to None,
the actual transpose of the affinity matrix computed in this function
will be used.
Returns:
The nearest-neighbor matrix used to compute the pseudo-labels.
The pseudo-labels for the provided batch of data, shape
[batch_size, num_classes].
"""
# Start by computing the affinity matrix
nn_matrix = self.compute_nearest_neighbors(
batch_feature=batch_feature,
dataset_feature=dataset_feature,
knn=knn,
sparse_storage=True,
)
# Potentially keep mutual nearest-neighbors only.
if use_mutual_nn:
if transpose_nn_matrix is None:
final_nn_matrix = nn_matrix.multiply(nn_matrix.T) # pytype: disable=attribute-error # jax-ndarray
else:
final_nn_matrix = nn_matrix.multiply(transpose_nn_matrix) # pytype: disable=attribute-error # jax-ndarray
else:
final_nn_matrix = nn_matrix
# Prepare the teacher function.
teacher_step_fn = functools.partial(
self.teacher_step,
nn_matrix=final_nn_matrix,
lambda_=lambda_,
alpha=alpha,
normalize_pseudo_labels=normalize_pseudo_labels,
)
if multi_label:
# In the multi-label scnenario, we're solving `num_classes` independent
# binary problems. Therefore, the class dimension can be treated as a
# batch dimension, and the 'probability dimension' is actually 2.
def reshape_binary_probabilities(proba):
proba = proba.T
return jnp.stack([1 - proba, proba], axis=-1)
dataset_proba = reshape_binary_probabilities(
dataset_proba
) # [num_classes, dataset_size, 2]
batch_proba = reshape_binary_probabilities(
batch_proba
) # [num_classes, batch_size, 2]
pseudo_label = []
for classwise_batch_proba, classwise_dataset_proba in zip(
batch_proba, dataset_proba
):
pseudo_label.append(
teacher_step_fn(
batch_proba=classwise_batch_proba,
dataset_proba=classwise_dataset_proba,
)
)
pseudo_label = jnp.stack(pseudo_label) # [num_classes, batch_size, 2]
# We select the 'positive' probability
pseudo_label = pseudo_label[..., -1].T # [batch_size, num_classes]
else:
pseudo_label = teacher_step_fn(
batch_proba=batch_proba, dataset_proba=dataset_proba
) # [batch_size, num_classes]
return nn_matrix, jax.lax.stop_gradient(pseudo_label)
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation.
In NOTELA, the loss minimized w.r.t. the network is a simple cross-entropy
between the model's (noisy) outputs and the pseudo-labels.
Args:
supervised: Whether the problem is supervised. Only used to know if we can
track supervised metrics, such as accuracy.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Method's kwargs.
Returns:
A collection of metrics.
"""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
def single_label_loss_fn(probabilities, pseudo_label, label_mask, **_):
pl_xent = losses.label_xent(
probabilities=probabilities, label=pseudo_label, label_mask=label_mask
)
return pl_xent
def multi_label_loss_fn(
probabilities: jnp.ndarray,
pseudo_label: jnp.ndarray,
label_mask: jnp.ndarray,
**_,
):
pl_xent = losses.label_binary_xent(
probabilities=probabilities,
label=pseudo_label,
label_mask=label_mask,
)
return pl_xent
loss_fn = multi_label_loss_fn if multi_label else single_label_loss_fn
metrics_dict["main_loss"] = clu_metrics.Average.from_fun(loss_fn)
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test-Time Entropy Minimization (TENT) method."""
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from clu import metrics as clu_metrics
class Tent(adapt.SFDAMethod):
"""Test-time entropy minimization method."""
_CITATION = (
'Wang, Dequan, et al. "Tent: Fully test-time adaptation by '
'entropy minimization." ICLR (2021).'
)
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation."""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)['__annotations__']
if multi_label:
entropy_fn = losses.label_binary_ent
else:
entropy_fn = losses.label_ent
metrics_dict['main_loss'] = clu_metrics.Average.from_fun(entropy_fn)
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A pseudo-labelling baseline with confidence thresholding."""
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from chirp.projects.sfda import method_utils
from chirp.projects.sfda import model_utils
from clu import metrics as clu_metrics
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
class PseudoLabel(adapt.SFDAMethod):
"""A pseudo-labelling baseline with confidence thresholding.
The original paper does not use confidence thresholding, but we include it,
as it is a popular trick used to help stabilize training with pseudo-labels.
"""
_CITATION = (
"Lee, Dong-Hyun. 'Pseudo-label: The simple and efficient semi-supervised"
" learning method for deep neural networks.' Workshop on challenges in "
"representation learning, ICML. Vol. 3. No. 2. 2013."
)
def before_iter(
self,
key: jax.random.PRNGKeyArray,
batch: dict[str, np.ndarray],
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> tuple[adapt.AdaptationState, dict[str, jnp.ndarray]]:
"""Compute the pseudo-labels for the current batch.
Low-confidence samples are masked out when computing the loss. We hereby
compute the mask to only retain high-confidence samples.
Args:
key: The jax random key used for random operations.
batch: The current batch of data.
adaptation_state: The current state of adaptation.
model_bundle: The ModelBundle used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwarg
Returns:
The untouched adaptation_state.
A dictionary containing the pseudo-labels and masks to use for the
iteration.
"""
# In the online version, we compute the pseudo-labels on-the-go.
forward_step = self.cache_get_forward_step(
model_bundle.model, modality, method_kwargs["update_bn_statistics"]
)
model_output = forward_step( # pytype: disable=wrong-arg-types # jax-ndarray
adapt.keep_jax_types(batch),
adaptation_state.model_state,
adaptation_state.model_params,
None,
)
reference_label_mask = method_utils.get_label_mask(batch)
probabilities = adapt.logit2proba(
model_output.label, reference_label_mask, multi_label
) # [1, batch_size, num_classes]
if multi_label:
# In the multi-label case, given that each class is treated indepently,
# we perform masking at a "class level", meaning that within one sample,
# all class probabilities above a certain threshold will contribute to
# the loss.
pseudo_label = (
probabilities > method_kwargs["confidence_threshold"]
).astype(
jnp.float32
) # [batch_size, num_classes]
pseudo_label_mask = pseudo_label
else:
if reference_label_mask is None:
reference_label_mask = jnp.ones_like(probabilities)
# In the single-label case, we perform masking at a "sample level",
# meaning that a sample will only contribute to the loss ifs its maximum
# probability is above some threshold.
pseudo_label_mask = (probabilities * reference_label_mask).max(
-1
) > method_kwargs[
"confidence_threshold"
] # [batch_size]
num_classes = probabilities.shape[-1]
pseudo_label = nn.one_hot(
jnp.argmax(probabilities * reference_label_mask, axis=-1),
num_classes,
axis=-1,
)
return adaptation_state, {
"pseudo_label": pseudo_label,
"pseudo_label_mask": pseudo_label_mask,
}
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation."""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
def single_label_loss_fn(
probabilities, pseudo_label, pseudo_label_mask, label_mask, **_
):
pl_xent = losses.label_xent(
probabilities=probabilities,
label=pseudo_label,
label_mask=label_mask,
sample_mask=pseudo_label_mask,
)
return pl_xent
def multi_label_loss_fn(
probabilities: jnp.ndarray,
pseudo_label: jnp.ndarray,
pseudo_label_mask: jnp.ndarray,
label_mask: jnp.ndarray,
**_
):
# Sample's probabilities that end up contributing to the final computation
# are those left unmasked by label_mask (defined by the target domain and
# restricting the set of possible species) and are confident enough (
# left unmasked by pseudo_label_mask).
pl_xent = losses.label_binary_xent(
probabilities=probabilities,
label=pseudo_label,
label_mask=label_mask * pseudo_label_mask,
)
return pl_xent
loss_fn = multi_label_loss_fn if multi_label else single_label_loss_fn
metrics_dict["main_loss"] = clu_metrics.Average.from_fun(loss_fn)
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A baseline that simply updates BatchNorm's statistics."""
from chirp.projects.sfda import adapt
from clu import metrics as clu_metrics
class AdaBN(adapt.SFDAMethod):
"""A baseline that simply updates BatchNorm's statistics.
No optimization takes place, the method only fowards the data through the
model, with the option 'update_bn_statistics' activated.
"""
_CITATION = (
"Li, Yanghao, et al. 'Revisiting batch normalization for practical "
"domain adaptation.' arXiv preprint arXiv:1603.04779 (2016)."
)
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation."""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of Source HypOthesis Transfer (SHOT)."""
from absl import logging
from chirp.projects.sfda import adapt
from chirp.projects.sfda import losses
from chirp.projects.sfda import method_utils
from chirp.projects.sfda import model_utils
from clu import metrics as clu_metrics
import flax
import flax.jax_utils as flax_utils
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tqdm
@flax.struct.dataclass
class SHOTMultiLabelLoss(clu_metrics.Metric):
"""Computes the loss used in SHOT-full for the multi-label case."""
probabilities_sum: jnp.ndarray
entropy_sum: jnp.ndarray
pl_xent_sum: jnp.ndarray
label_mask: jnp.ndarray
n_samples: int
beta: float
@classmethod
def from_model_output(
cls,
probabilities: jnp.ndarray,
pseudo_label: jnp.ndarray,
beta: float,
label_mask: jnp.ndarray,
**_
) -> "SHOTMultiLabelLoss":
"""Creates the metric from model's output.
Args:
probabilities: The model's binary probabilities. Shape [batch_size,
num_classes].
pseudo_label: The pseudo-labels (computed before each epoch). Shape
[batch_size, num_classes].
beta: Weight controlling the influence of the pseudo-label cross-entropy.
label_mask: A mask to control which classes to discard.
**_:
Returns:
An instance of "SHOTMultiLabelLoss".
"""
entropy_sum = losses.label_binary_ent(
probabilities=probabilities, label_mask=label_mask
).sum(axis=0)
pl_xent_sum = losses.label_binary_xent(
probabilities=probabilities, label=pseudo_label, label_mask=label_mask
).sum(axis=0)
probabilities_sum = probabilities.sum(axis=0)
return cls(
probabilities_sum=probabilities_sum,
entropy_sum=entropy_sum,
pl_xent_sum=pl_xent_sum,
label_mask=label_mask,
n_samples=probabilities.shape[0],
beta=beta,
)
def merge(self, other: "SHOTMultiLabelLoss") -> "SHOTMultiLabelLoss":
return type(self)(
probabilities_sum=self.probabilities_sum + other.probabilities_sum,
entropy_sum=self.entropy_sum + other.entropy_sum,
pl_xent_sum=self.pl_xent_sum + other.pl_xent_sum,
n_samples=self.n_samples + other.n_samples,
label_mask=other.label_mask,
beta=other.beta,
)
def compute(self):
probabilities_marginal = self.probabilities_sum / self.n_samples
marginal_entropy = losses.label_binary_ent(
probabilities=probabilities_marginal, label_mask=self.label_mask[0]
)
cond_entropy = self.entropy_sum / self.n_samples
return (
cond_entropy
- marginal_entropy
+ self.beta * self.pl_xent_sum / self.n_samples
)
@flax.struct.dataclass
class SHOTLoss(clu_metrics.Metric):
"""Computes the loss used in SHOT-full for the single-label case."""
probabilities_sum: jnp.ndarray
entropy_sum: jnp.ndarray
pl_xent_sum: jnp.ndarray
label_mask: jnp.ndarray | None
n_samples: int
beta: float
@classmethod
def from_model_output(
cls,
probabilities: jnp.ndarray,
pseudo_label: jnp.ndarray,
label_mask: jnp.ndarray,
beta: float,
**_
) -> "SHOTLoss":
entropy_sum = losses.label_ent(
probabilities=probabilities, label_mask=label_mask
).sum(axis=0)
pl_xent_sum = losses.label_xent(
probabilities=probabilities, label=pseudo_label, label_mask=label_mask
).sum(axis=0)
probabilities_sum = probabilities.sum(axis=0)
return cls(
probabilities_sum=probabilities_sum,
entropy_sum=entropy_sum,
pl_xent_sum=pl_xent_sum,
label_mask=label_mask,
n_samples=probabilities.shape[0],
beta=beta,
)
def merge(self, other: "SHOTLoss") -> "SHOTLoss":
return type(self)(
probabilities_sum=self.probabilities_sum + other.probabilities_sum,
entropy_sum=self.entropy_sum + other.entropy_sum,
pl_xent_sum=self.pl_xent_sum + other.pl_xent_sum,
label_mask=other.label_mask,
n_samples=self.n_samples + other.n_samples,
beta=other.beta,
)
def compute(self):
probabilities_marginal = self.probabilities_sum / self.n_samples
reference_mask = None if self.label_mask is None else self.label_mask[0]
marginal_entropy = losses.label_ent(probabilities_marginal, reference_mask)
cond_entropy = self.entropy_sum / self.n_samples
return (
cond_entropy
- marginal_entropy
+ self.beta * self.pl_xent_sum / self.n_samples
)
class SHOT(adapt.SFDAMethod):
"""SHOT method for SFDA."""
_CITATION = (
"Liang, Jian, Dapeng Hu, and Jiashi Feng. 'Do we really need to access "
"the source data? source hypothesis transfer for unsupervised domain "
"adaptation.' International Conference on Machine Learning. PMLR, 2020."
)
@staticmethod
def compute_pseudo_label(
dataset_feature: jnp.ndarray,
dataset_probability: jnp.ndarray,
multi_label: bool,
eps: float = 1e-6,
) -> jnp.ndarray:
"""A jax reimplementation of SHOT's pseudo-labelling procedure.
Original function at https://github.com/tim-learn/SHOT/blob/
07d0c713e4882e83fded1aff2a447dff77856d64/object/image_target.py#L242.
Args:
dataset_feature: The feature for all points in the dataset. Shape
[dataset_size, feature_dim].
dataset_probability: Model's probabilities for the current dataset. Shape
[dataset_size, num_classes].
multi_label: Whether this is a multi-label problem.
eps: For numerical stability.
Returns:
The pseudo-labels, shape [dataset_size, num_classes]
"""
classwise_pseudo_label = []
dataset_probability = dataset_probability.T # [num_classes, dataset_size]
if multi_label:
dataset_probability = jnp.stack(
[1 - dataset_probability, dataset_probability], axis=1
) # [num_classes, probabilities_dim, dataset_size]
else:
dataset_probability = jnp.expand_dims(
dataset_probability, axis=0
) # [1, num_classes, dataset_size]
probabilities_dim = dataset_probability.shape[1]
# We loop over the classes. Vectorizing this part implies broadcasting
# `dataset_feature` num_classes times, which may easily lead to OOM.
for class_probabilities in tqdm.tqdm(
dataset_probability, total=dataset_probability.shape[0]
):
# Compute initial clusters Eq (4).
mu_0 = (class_probabilities @ dataset_feature) / (
class_probabilities.sum(-1, keepdims=True) + eps
) # [probabilities_dim, feature_dim]
# Compute initial pseudo-labels Eq (5)
dist = method_utils.jax_cdist(
dataset_feature, mu_0
) # [dataset_size, probabilities_dim]
one_hot_pseudo_label = nn.one_hot(
dist.argmin(-1), probabilities_dim, axis=-1
).transpose() # [probabilities_dim, dataset_size]
# Re-Compute clusters and pseudo-labels Eq (6). Equivalent to a second
# iteration of K-means.
mu_1 = (one_hot_pseudo_label @ dataset_feature) / (
one_hot_pseudo_label.sum(-1, keepdims=True) + eps
) # [probabilities_dim, feature_dim]
dist = method_utils.jax_cdist(
dataset_feature, mu_1
) # [dataset_size, probabilities_dim]
classwise_pseudo_label.append(dist.argmin(-1)) # [dataset_size]
final_pseudo_label = jnp.stack(
classwise_pseudo_label, 1
) # [dataset_size, num_classes]
final_pseudo_label = nn.one_hot(
final_pseudo_label, probabilities_dim, axis=-1
) # [dataset_size, num_classes, probabilities_dim]
if not multi_label:
assert final_pseudo_label.shape[1] == 1
final_pseudo_label = jnp.squeeze(
final_pseudo_label, axis=1
) # [dataset_size, probabilities_dim=num_classes]
else:
final_pseudo_label = final_pseudo_label[
..., -1
] # [dataset_size, num_classes]
return final_pseudo_label
def before_epoch(
self,
key: jax.random.PRNGKeyArray,
model_bundle: model_utils.ModelBundle,
adaptation_state: adapt.AdaptationState,
adaptation_dataset: tf.data.Dataset,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> adapt.AdaptationState:
"""Compute the pseudo-labels.
Args:
key: The jax random key used for random operations in this epoch.
model_bundle: The ModelBundle used for adaptation.
adaptation_state: The current state of adaptation.
adaptation_dataset: The dataset used for adaptation.
modality: The modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwargs.
Returns:
The adaptation state, with a potentially updated 'method_state' attribute.
"""
logging.info("Preparing pseudo-labels...")
# Extract dataset_feature and model's probabilities.
forward_result = method_utils.forward_dataset(
dataset=adaptation_dataset,
adaptation_state=adaptation_state,
model_bundle=model_bundle,
modality=modality,
multi_label=multi_label,
use_batch_statistics=method_kwargs["update_bn_statistics"],
)
# Compute pseudo-labels that will be used during the next epoch of
# adaptation.
pseudo_label = SHOT.compute_pseudo_label(
dataset_feature=forward_result["embedding"],
dataset_probability=forward_result["proba"],
multi_label=multi_label,
)
sample_ids = forward_result["id"]
method_state = {
"pseudo_label": pseudo_label,
"id2index": {sample_ids[i]: i for i in range(len(sample_ids))},
}
adaptation_state = adaptation_state.replace(method_state=method_state)
return adaptation_state
def before_iter(
self,
key: jax.random.PRNGKeyArray,
batch: dict[str, np.ndarray],
adaptation_state: adapt.AdaptationState,
model_bundle: model_utils.ModelBundle,
modality: adapt.Modality,
multi_label: bool,
**method_kwargs
) -> tuple[adapt.AdaptationState, dict[str, jnp.ndarray]]:
"""Grab the pseudo-labels from memory for the current batch.
Args:
key: The jax random key used for random operations.
batch: The current batch of data.
adaptation_state: The current state of adaptation.
model_bundle: The ModelBundle used for adaptation.
modality: The current modality.
multi_label: Whether this is a multi-label problem.
**method_kwargs: Additional method-specific kwarg
Returns:
The untouched adaptation_state.
A dictionary containing the pseudo-labels to use for the iteration.
"""
method_state = flax_utils.unreplicate(adaptation_state.method_state)
id2index = method_state["id2index"]
batch_indexes = np.array(
[id2index[x] for x in flax_utils.unreplicate(batch["tfds_id"])]
)
pseudo_label = method_state["pseudo_label"][batch_indexes]
# pad pseudo-labels to match model output as needed.
label_mask = method_utils.get_label_mask(batch)
pseudo_label = method_utils.pad_pseudo_label(
label_mask, pseudo_label, adaptation_state
)
return adaptation_state, {
"pseudo_label": flax_utils.replicate(pseudo_label)
}
def get_adaptation_metrics(
self, supervised: bool, multi_label: bool, **method_kwargs
) -> type[clu_metrics.Collection]:
"""Obtain metrics that will be monitored during adaptation."""
metrics_dict = vars(
adapt.get_common_metrics(supervised=supervised, multi_label=multi_label)
)["__annotations__"]
if multi_label:
metrics_dict["main_loss"] = SHOTMultiLabelLoss
else:
metrics_dict["main_loss"] = SHOTLoss
return clu_metrics.Collection.create(**metrics_dict)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adaptation part."""
import itertools
import shutil
import tempfile
from chirp import config_utils
from chirp.data import utils as data_utils
from chirp.models import frontend
from chirp.preprocessing import pipeline
from chirp.projects.sfda import adapt
from chirp.projects.sfda import model_utils
from chirp.projects.sfda import models
from chirp.projects.sfda.configs import ada_bn as ada_bn_config
from chirp.projects.sfda.configs import audio_baseline
from chirp.projects.sfda.configs import config_globals
from chirp.projects.sfda.configs import dropout_student as ds_config
from chirp.projects.sfda.configs import dust as dust_config
from chirp.projects.sfda.configs import image_baseline
from chirp.projects.sfda.configs import notela as notela_config
from chirp.projects.sfda.configs import nrc as nrc_config
from chirp.projects.sfda.configs import pseudo_label as pseudo_label_config
from chirp.projects.sfda.configs import shot as shot_config
from chirp.projects.sfda.configs import tent as tent_config
from chirp.projects.sfda.tests import fake_image_dataset
from chirp.tests import fake_dataset
from flax import traverse_util
import flax.linen as nn
import jax.numpy as jnp
from absl.testing import absltest
from absl.testing import parameterized
_UNPARSED_CONFIGS = {
"tent": tent_config,
"notela": notela_config,
"pseudo_label": pseudo_label_config,
"shot": shot_config,
"ada_bn": ada_bn_config,
"dropout_student": ds_config,
"nrc": nrc_config,
"dust": dust_config,
}
class ConstantEncoder(nn.Module):
"""A no-op encoder for quickly testing adaptation loop."""
output_dim: int = 32
@nn.compact
def __call__(
self, inputs: jnp.ndarray, train: bool, use_running_average: bool # pylint: disable=redefined-outer-name
) -> jnp.ndarray:
return jnp.zeros([inputs.shape[0], self.output_dim])
class AdaptationTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.adapt_dir = tempfile.mkdtemp()
self.audio_data_dir = tempfile.mkdtemp()
self.image_data_dir = tempfile.mkdtemp()
fake_audio_builder = fake_dataset.FakeDataset(data_dir=self.audio_data_dir)
fake_image_builder = fake_image_dataset.FakeImageDataset(
data_dir=self.image_data_dir
)
fake_audio_builder.download_and_prepare()
fake_image_builder.download_and_prepare()
self.image_builder = fake_image_builder
self.audio_builder = fake_audio_builder
def tearDown(self):
super().tearDown()
shutil.rmtree(self.adapt_dir)
shutil.rmtree(self.audio_data_dir)
shutil.rmtree(self.image_data_dir)
def _get_datasets(self, config, modality: adapt.Modality):
if modality == adapt.Modality.AUDIO:
adaptation_dataset, _ = data_utils.get_dataset(
"train[:2]",
dataset_directory=self.audio_builder.data_dir,
pipeline=config.adaptation_data_config.pipeline,
)
val_dataset, _ = data_utils.get_dataset(
"train[1:2]",
dataset_directory=self.audio_builder.data_dir,
pipeline=config.eval_data_config.pipeline,
)
else:
input_pipeline = models.MODEL_REGISTRY[config.model_config.encoder](
num_classes=1
).get_input_pipeline
dataset = input_pipeline(
data_builder=self.image_builder, split="train[:2]"
)
dataset = dataset.batch(1, drop_remainder=False).batch(
1, drop_remainder=False
)
adaptation_dataset = val_dataset = dataset
return adaptation_dataset, val_dataset
def _get_configs(
self,
modality: adapt.Modality,
method: str,
use_constant_encoder: bool = True,
):
"""Create configuration dictionary for training."""
if modality == adapt.Modality.AUDIO:
config = audio_baseline.get_config()
config = config_utils.parse_config(config, config_globals.get_globals())
config.init_config.target_class_list = "xenocanto"
config.sample_rate_hz = 50
toy_pipeline = pipeline.Pipeline(
ops=[
pipeline.ConvertBirdTaxonomyLabels(
source_namespace="ebird2021",
target_class_list=config.init_config.target_class_list,
add_taxonomic_labels=True,
),
pipeline.Batch(batch_size=1, split_across_devices=True),
pipeline.RandomSlice(window_size=1),
]
)
config.adaptation_data_config.pipeline = toy_pipeline
config.eval_data_config.pipeline = toy_pipeline
if use_constant_encoder:
config.model_config.encoder = ConstantEncoder(output_dim=32)
config.model_config.frontend = frontend.MelSpectrogram(
features=32,
stride=config.sample_rate_hz // 25,
kernel_size=10,
sample_rate=config.sample_rate_hz,
freq_range=(60, 10_000),
)
elif modality == adapt.Modality.IMAGE:
config = image_baseline.get_config()
config = config_utils.parse_config(config, config_globals.get_globals())
config.init_config.target_class_list = "fake_image_dataset"
config.init_config.input_shape = None
if use_constant_encoder:
config.model_config.encoder = models.ImageModelName.CONSTANT
method_config = _UNPARSED_CONFIGS[method].get_config()
method_config = config_utils.parse_config(
method_config, config_globals.get_globals()
)
return config, method_config
@parameterized.named_parameters(
*[
(f"{method}_{modality.value}", method, modality)
for method, modality in itertools.product(
[
"dropout_student",
"ada_bn",
"tent",
"notela",
"pseudo_label",
"shot",
"nrc",
"dust",
],
[adapt.Modality.IMAGE, adapt.Modality.AUDIO],
)
]
)
def test_adapt_one_epoch(self, method: str, modality: adapt.Modality):
"""Test an epoch of adaptation for SFDA methods."""
# Recover the configurations dict.
config, method_config = self._get_configs(modality, method)
sfda_method = method_config.sfda_method
method_config = getattr(method_config, modality.value)
method_config.num_epochs = 1
# Get data
adaptation_dataset, val_dataset = self._get_datasets(config, modality)
# Initialize state and parameters
model_bundle, adaptation_state, key, rename_fn, inverse_rename_fn = (
sfda_method.initialize(
model_config=config.model_config,
rng_seed=config.init_config.rng_seed,
pretrained=False,
input_shape=config.init_config.input_shape,
target_class_list=config.init_config.target_class_list,
adaptation_iterations=method_config.num_epochs
* len(adaptation_dataset),
modality=modality,
optimizer_config=method_config.optimizer_config,
)
)
# Perform adaptation.
new_adaptation_state = adapt.perform_adaptation(
key=key,
adaptation_state=adaptation_state,
rename_fn=rename_fn,
inverse_rename_fn=inverse_rename_fn,
adaptation_dataset=adaptation_dataset,
validation_dataset=val_dataset,
model_bundle=model_bundle,
logdir=self.adapt_dir,
use_supervised_metrics=True,
target_class_list=config.init_config.target_class_list,
multi_label=config.multi_label,
modality=modality,
eval_every=config.eval_every,
eval_mca_every=config.eval_mca_every,
sfda_method=sfda_method,
**method_config,
)
self.assertIsNotNone(new_adaptation_state)
def test_mask_parameters_audio(self):
"""Testing parameter masking used to restrict trainable parameters."""
config, _ = self._get_configs(
adapt.Modality.AUDIO, "tent", use_constant_encoder=False
)
model_bundle, params, _, _, _, _ = model_utils.prepare_audio_model(
model_config=config.model_config,
optimizer_config=None,
total_steps=0,
rng_seed=config.init_config.rng_seed,
input_shape=config.init_config.input_shape,
pretrained=False,
target_class_list=config.init_config.target_class_list,
)
self._test_mask_parameters(params, model_bundle.model)
@parameterized.named_parameters(
("resnet", models.ImageModelName.RESNET),
("wideresnet", models.ImageModelName.WIDERESNET),
)
def test_mask_parameters_image(self, model: models.ImageModelName):
"""Testing parameter masking used to restrict trainable parameters."""
config, _ = self._get_configs(adapt.Modality.IMAGE, "tent")
config.model_config.encoder = model
model_bundle, params, _, _, _, _ = model_utils.prepare_image_model(
model_config=config.model_config,
optimizer_config=None,
total_steps=1,
rng_seed=config.init_config.rng_seed,
pretrained=False,
input_shape=config.init_config.input_shape,
target_class_list=config.init_config.target_class_list,
)
self._test_mask_parameters(params, model_bundle.model)
def _test_mask_parameters(self, params, model):
# Test BN masking
masked_params = model_utils.mask_parameters(
params, model_utils.TrainableParams.BN, model
)
for p, masked in traverse_util.flatten_dict(masked_params).items():
if model.is_bn_parameter(p):
self.assertFalse(masked)
else:
self.assertTrue(masked)
# Test no masking
masked_params = model_utils.mask_parameters(
params, model_utils.TrainableParams.ALL, model
)
for p, masked in traverse_util.flatten_dict(masked_params).items():
self.assertFalse(masked)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SFDA utilities at chirp/projects/sfda/method_utils.py."""
from chirp.projects.sfda import method_utils
import jax
import jax.numpy as jnp
from scipy.spatial import distance
from absl.testing import absltest
class MethodUtilsTest(absltest.TestCase):
def test_cdist(self):
"""Ensure that our pairwise distance function produces expected results."""
feature_dim = 3
n_points_a = 2
n_points_b = 3
features_a = jax.random.normal(
jax.random.PRNGKey(0), (n_points_a, feature_dim)
)
feature_b = jax.random.normal(
jax.random.PRNGKey(1), (n_points_b, feature_dim)
)
pairwises_sqr_distances_ours = method_utils.jax_cdist(features_a, feature_b)
pairwises_sqr_distances_scipy = distance.cdist(features_a, feature_b) ** 2
self.assertTrue(
jnp.allclose(
pairwises_sqr_distances_scipy, pairwises_sqr_distances_ours
)
)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SHOT method."""
from chirp.projects.sfda.methods import shot
import flax.linen as nn
import jax
import numpy as np
from scipy.spatial import distance
from absl.testing import absltest
class ShotTest(absltest.TestCase):
def original_pl(
self, embeddings: np.ndarray, probabilities: np.ndarray, threshold=0.0
) -> np.ndarray:
"""The orignal implementation of SHOT's pseudo-labelling function.
Taken from https://github.com/tim-learn/SHOT/blob/
07d0c713e4882e83fded1aff2a447dff77856d64/object/image_target.py#L242.
Args:
embeddings: The model's embeddings.
probabilities: The model's probabilities.
threshold: A threshold to only keep classes with a certain number of
samples (set to 0 in the original code).
Returns:
The hard pseudo-labels.
"""
predict = np.argmax(probabilities, axis=-1)
num_classes = probabilities.shape[1]
aff = probabilities
for _ in range(2):
initc = aff.transpose().dot(embeddings)
initc = initc / (1e-8 + aff.sum(axis=0)[:, None])
cls_count = np.eye(num_classes)[predict].sum(axis=0)
labelset = np.where(cls_count >= threshold)[0]
dd = distance.cdist(embeddings, initc[labelset])
pred_label = dd.argmin(axis=1)
predict = labelset[pred_label]
aff = np.eye(num_classes)[predict]
return predict.astype('int')
def test_pseudo_label(self):
"""Ensure that our reimplementation of SHOT's pseudo-labelling is correct."""
n_points, feature_dim, num_classes = 10, 100, 10
fake_embeddings = jax.random.normal(
jax.random.PRNGKey(57), (n_points, feature_dim)
)
fake_probabilities = nn.softmax(
jax.random.normal(jax.random.PRNGKey(58), (n_points, num_classes))
)
pl_original = self.original_pl(
np.array(fake_embeddings), np.array(fake_probabilities)
)
pl_ours = shot.SHOT.compute_pseudo_label(
dataset_feature=fake_embeddings,
dataset_probability=fake_probabilities,
multi_label=False,
)
self.assertTrue(np.allclose(pl_original, pl_ours.argmax(-1)))
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A fake image dataset for testing."""
import numpy as np
import tensorflow_datasets as tfds
class FakeImageDataset(tfds.core.GeneratorBasedBuilder):
"""Fake image dataset used for testing purposes."""
VERSION = tfds.core.Version('1.0.0')
def _split_generators(self, dl_manager):
return {
'train': self._generate_examples(10),
'test': self._generate_examples(10),
}
def _info(self) -> tfds.core.DatasetInfo:
"""Dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(None, None, 3)),
'label': tfds.features.ClassLabel(names=['cat', 'dog']),
}),
)
def _generate_examples(self, num_examples):
for i in range(num_examples):
yield i, {
'image': np.zeros((12, 12, 3)).astype(np.uint8),
'label': np.random.choice(self._info().features['label'].names),
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for NOTELA method."""
import functools
import itertools
from chirp.projects.sfda import adapt
from chirp.projects.sfda import method_utils
from chirp.projects.sfda.methods import notela
import flax.linen as nn
import jax
import jax.numpy as jnp
from scipy import sparse
from absl.testing import absltest
class NOTELATest(absltest.TestCase):
def test_nn_matrix(self):
"""Ensure that NOTELA's nearest_neighbors matrix contains valid values."""
feature_dim = 5
n_points_batch = 2
n_points_dataset = 3
knn = 2
key = jax.random.PRNGKey(0)
batch_feature = jax.random.normal(key, (n_points_batch, feature_dim))
dataset_feature = jax.random.normal(key, (n_points_dataset, feature_dim))
compute_nearest_neighbor_fn = functools.partial(
notela.NOTELA.compute_nearest_neighbors,
batch_feature=batch_feature,
dataset_feature=dataset_feature,
knn=knn,
)
def to_dense(matrix):
if isinstance(matrix, jnp.ndarray):
return matrix
else:
return matrix.todense()
nearest_neighbors_matrices = []
for efficient, sparse_storage in itertools.product(
(True, False), (True, False)
):
nearest_neighbors_matrices.append(
compute_nearest_neighbor_fn(
sparse_storage=sparse_storage,
memory_efficient_computation=efficient,
)
)
nearest_neighbors_reference = to_dense(nearest_neighbors_matrices[0])
self.assertEqual(
nearest_neighbors_reference.shape, (n_points_batch, n_points_dataset)
)
self.assertTrue((nearest_neighbors_reference.sum(-1) >= 0).all())
self.assertTrue((nearest_neighbors_reference.sum(-1) <= knn).all())
for nn_matrix_version in nearest_neighbors_matrices[1:]:
self.assertTrue(
jnp.allclose(nearest_neighbors_reference, to_dense(nn_matrix_version))
)
def test_teacher_step(self):
"""Ensure that NOTELA's teacher-step produces valid pseudo-labels."""
n_points_batch = 2
lambda_ = 1.0
alpha = 1.0
n_points_dataset = 3
key = jax.random.PRNGKey(0)
def one_hot(probas):
return jnp.stack([1 - probas, probas], axis=-1)
batch_proba = nn.sigmoid(jax.random.normal(key, (n_points_batch,)))
dataset_proba = nn.sigmoid(jax.random.normal(key, (n_points_dataset,)))
nn_matrix = jax.random.randint(
key, (n_points_batch, n_points_dataset), 0, 2
)
sparse_nn_matrix = sparse.csr_matrix(nn_matrix)
pseudo_labels = notela.NOTELA.teacher_step(
batch_proba=one_hot(batch_proba),
dataset_proba=one_hot(dataset_proba),
nn_matrix=nn_matrix,
lambda_=lambda_,
alpha=alpha,
normalize_pseudo_labels=True,
)
pseudo_labels_from_sparse = notela.NOTELA.teacher_step(
batch_proba=one_hot(batch_proba),
dataset_proba=one_hot(dataset_proba),
nn_matrix=sparse_nn_matrix,
lambda_=lambda_,
alpha=alpha,
normalize_pseudo_labels=True,
)
self.assertTrue(
jnp.allclose(
pseudo_labels.sum(-1),
jnp.ones_like(pseudo_labels.sum(-1)),
atol=1e-4,
)
)
self.assertTrue(
jnp.allclose(
pseudo_labels,
pseudo_labels_from_sparse,
)
)
def test_pad_pseudo_label(self):
key = jax.random.PRNGKey(0)
num_samples = 3
label_mask = jnp.array([0, 1, 0, 0, 1]).astype(bool)
used_classes = label_mask.sum()
pseudo_labels = jax.random.normal(key, (num_samples, used_classes))
fake_adaptation_state = adapt.AdaptationState(0, 0, {}, {}, {}, {}, True)
padded_pseudo_label = method_utils.pad_pseudo_label(
label_mask, pseudo_labels, fake_adaptation_state
)
self.assertTrue((padded_pseudo_label[:, label_mask] == pseudo_labels).all())
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for losses used in SFDA.."""
from chirp.projects.sfda import losses
import flax.linen as nn
import jax
import jax.numpy as jnp
from absl.testing import absltest
class LossesTest(absltest.TestCase):
def test_binary_entropy(self):
num_classes = 5
n_points = 10
logits = jax.random.uniform(jax.random.PRNGKey(0), (n_points, num_classes))
binary_probas = nn.sigmoid(logits)
label_mask = jnp.array([1, 1, 0, 0, 1])
# Test that masking works as intended.
masked_ent = losses.label_binary_ent(
probabilities=binary_probas,
label_mask=jnp.tile(label_mask, (n_points, 1)),
eps=0.0,
)
ent = losses.label_binary_ent(
probabilities=binary_probas[:, label_mask.astype(bool)], eps=0.0
)
self.assertAlmostEqual(masked_ent.mean(), ent.mean())
# Test that binary entropies fall in the right range [0, log(2)]
self.assertTrue((ent >= 0.0).all())
self.assertTrue((ent <= jnp.log(2)).all())
# Test that entropy and cross-entropy are consitent
ent = losses.label_binary_ent(
probabilities=binary_probas, label=binary_probas
)
xent = losses.label_binary_xent(
probabilities=binary_probas, label=binary_probas
)
self.assertAlmostEqual(ent.mean(), xent.mean(), delta=1e-7)
def test_standard_entropy(self):
num_classes = 5
n_points = 10
logits = jax.random.uniform(jax.random.PRNGKey(0), (n_points, num_classes))
probabilities = nn.softmax(logits)
ent = losses.label_ent(probabilities=probabilities, label_mask=None)
# Test that multi-class entropies fall in the range [0, log(num_classes)]
self.assertTrue((ent >= 0.0).all())
self.assertTrue((ent <= jnp.log(num_classes)).all())
# Ensure that entropy and cross-entropy with self are same.
self.assertAlmostEqual(
ent.mean(),
losses.label_xent(
probabilities=probabilities,
label=probabilities,
label_mask=None,
).mean(),
)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A template for any image model."""
from chirp.models import output
from etils import epath
import flax
import flax.linen as nn
import tensorflow as tf
import tensorflow_datasets as tfds
class ImageModel(nn.Module):
"""A template for any image model."""
@nn.compact
def __call__(
self, x, train: bool, use_running_average: bool | None
) -> output.ClassifierOutput:
"""Just like any standard nn.Module, defines the foward pass of the model.
We formulate two non-standard requirements for the forward pass. First, it
must disentangle the train/test behavior of BatchNorm layers and those of
other noisy layers (e.g. Dropout). This is achieved through the use of
'train' and 'use_running_average' options. Second, we require that the
outputs are packaged into a output.ClassifierOutput, thereby including
both the encoder's features, as well as the head's output. See
chirp/projects/sfda/models/resnet.py for an example.
Args:
x: Batch of input images.
train: Whether this is training. This affects noisy layers' behavior (e.g.
Dropout). It also affects BatchNorm behavior in case
'use_running_average' is set to None.
use_running_average: Optional, used to decide whether to use running
statistics in BatchNorm (test mode), or the current batch's statistics
(train mode). If not specified (or specified to None), default to 'not
train'.
Returns:
The model's outputs, packaged as a output.ClassifierOutput.
"""
raise NotImplementedError
@staticmethod
def load_ckpt(dataset_name: str) -> flax.core.frozen_dict.FrozenDict:
"""Loads the checkpoint for the current dataset.
Args:
dataset_name: The current dataset used.
Returns:
variables: The flax variables corresponding to the loaded checkpoint.
"""
raise NotImplementedError
@staticmethod
def get_ckpt_path(dataset_name: str) -> epath.Path:
"""Returns the path to the checkpoint for the current dataset.
Using a separate function from 'load_ckpt' (the latter uses 'get_ckpt_path')
to make it easier to verify checkpoints paths.
Args:
dataset_name: The current dataset used.
Returns:
variables: The path to load the checkpoint.
"""
raise NotImplementedError
@staticmethod
def is_bn_parameter(parameter_name: list[str]) -> bool:
"""Verifies whether some parameter belong to a BatchNorm layer.
Args:
parameter_name: The name of the parameter, as a list in which each member
describes the name of a layer. E.g. ('Block1', 'batch_norm_1', 'bias').
Returns:
True if this parameter belongs to a BatchNorm layer.
"""
raise NotImplementedError
@staticmethod
def get_input_pipeline(
data_builder: tfds.core.DatasetBuilder, split: str, **kwargs
) -> tf.data.Dataset:
"""Get the data pipeline for the current model.
Because we're relying on pretrained models from the web, this part of the
data pipeline can hardly be factorized. We hereby provide a default
pipeline that converts image to tf.float32, and one-hots the labels.
However, we **leave it for each model to specify its own processing
pipeline**, with the only requirement of producing one-hot labels.
Args:
data_builder: The dataset's data builder.
split: The split of the dataset used.
**kwargs: Additional kwargs that may be useful for model-specific
pipelines.
Returns:
The processed dataset.
"""
read_config = tfds.ReadConfig(add_tfds_id=True)
dataset = data_builder.as_dataset(split=split, read_config=read_config)
def _pp(example):
image = tf.image.convert_image_dtype(example['image'], tf.float32)
label = tf.one_hot(
example['label'], data_builder.info.features['label'].num_classes
)
return {'image': image, 'label': label, 'tfds_id': example['tfds_id']}
return dataset.map(_pp, tf.data.experimental.AUTOTUNE)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialializing the registry of image models."""
import enum
from chirp.projects.sfda.models.constant_model import ConstantEncoderModel
from chirp.projects.sfda.models.nrc_resnet import NRCResNet50
from chirp.projects.sfda.models.nrc_resnet import NRCResNet101
from chirp.projects.sfda.models.resnet import ResNet50
from chirp.projects.sfda.models.wideresnet import WideResNet2810
class ImageModelName(enum.Enum):
"""Supported model architectures for image experiments."""
RESNET = "resnet"
NRC_RESNET = "nrc_resnet"
NRC_RESNET_OFFICE_HOME = "nrc_resnet_office_home"
WIDERESNET = "wideresnet"
CONSTANT = "constant"
MODEL_REGISTRY = {
ImageModelName.RESNET: ResNet50,
ImageModelName.NRC_RESNET: NRCResNet101,
ImageModelName.NRC_RESNET_OFFICE_HOME: NRCResNet50,
ImageModelName.WIDERESNET: WideResNet2810,
ImageModelName.CONSTANT: ConstantEncoderModel,
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ResNet v1.5 architecture used by NRC [1].
The architecture corresponds to the ResNet architecture in TorchVision, with the
following modifications:
- A dense bottleneck layer followed by batch normalization is applied after
the global average pooling operation.
- The dense output layer is weight-normalized.
[1] Yang, Shiqi, et al. "Exploiting the intrinsic neighborhood structure for
source-free domain adaptation." Advances in Neural Information Processing
Systems 34 (2021): 29393-29405.
"""
import functools
import re
from chirp.models import output
from chirp.projects.sfda.models import resnet
from etils import epath
import flax
from flax import linen as nn
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
class WNDense(nn.Dense):
"""Weight-normalized Dense layer."""
def param(self, name, init_fn, *init_args):
if name == 'kernel':
kernel_v = super().param('kernel_v', init_fn, *init_args)
param_shape, param_dtype = init_args
param_shape = (1, param_shape[1])
kernel_g = super().param('kernel_g', init_fn, *(param_shape, param_dtype))
scale = jnp.sqrt(
jnp.square(kernel_v).sum(
tuple(range(kernel_v.ndim - 1)), keepdims=True
)
)
return kernel_g * kernel_v / scale
else:
return super().param(name, init_fn, *init_args)
class NRCResNet(resnet.ResNet):
"""Re-implementation of the ResNet v1.5 architecture used in NRC."""
bottleneck_width: int = 256
@nn.compact
def __call__(self, x, train: bool, use_running_average: bool):
# There *is* a computational difference between using padding='SAME' and
# padding=1 for strided 3x3 convolutions, and to maintain compatibility
# with the PyTorch implementation of ResNet we need to pass padding=1 rather
# than the default padding='SAME' for 3x3 convolutions.
def conv(*args, **kwargs):
if args[1] == (3, 3):
fn = functools.partial(
self.conv, use_bias=False, padding=1, dtype=self.dtype
)
else:
fn = functools.partial(self.conv, use_bias=False, dtype=self.dtype)
return fn(*args, **kwargs)
norm = functools.partial(
nn.BatchNorm,
use_running_average=use_running_average,
momentum=0.9,
epsilon=1e-5,
dtype=self.dtype,
)
x = conv(
self.num_filters,
(7, 7),
(2, 2),
padding=[(3, 3), (3, 3)],
name='conv_init',
)(x)
x = norm(name='bn_init')(x)
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=((1, 1), (1, 1)))
for i, block_size in enumerate(self.stage_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = self.block_cls(
self.num_filters * 2**i,
strides=strides,
conv=conv,
norm=norm,
act=self.act,
)(x)
x = jnp.mean(x, axis=(1, 2))
x = nn.Dense(self.bottleneck_width, name='bottleneck_dense')(x)
x = norm(name='bottleneck_bn')(x)
model_outputs = {}
model_outputs['embedding'] = x
x = WNDense(self.num_classes, dtype=self.dtype)(x)
x = jnp.asarray(x, self.dtype)
model_outputs['label'] = x.astype(jnp.float32)
return output.ClassifierOutput(**model_outputs)
@staticmethod
def load_ckpt(dataset_name: str) -> flax.core.frozen_dict.FrozenDict:
pretrained_ckpt_dir = NRCResNet.get_ckpt_path(dataset_name)
with pretrained_ckpt_dir.open('rb') as f:
state_dict = dict(np.load(f))
variables = _to_variables(state_dict, dataset_name)
return variables
@staticmethod
def get_ckpt_path(dataset_name: str) -> epath.Path:
if 'vis_da_c' in dataset_name:
# The public checkpoint doesn't exist because it's derived from a
# PyTorch checkpoint (https://github.com/Albert0147/NRC_SFDA, which
# points to the Google Drive directory
# https://drive.google.com/drive/folders/1rI_I7GOHLi8jA4FnL10xdh8PA1bbwsIp).
# Download the .pt files locally, then save them into a .npz file using
# the following command:
# state_dict = {}; load_fn = lambda letter: state_dict.update({
# f'{letter}.{k}': v for k, v in torch.load(
# f'source_{letter}.pt',
# map_location=torch.device('cpu')).items()})
# load_fn('B'); load_fn('C'); load_fn('F')
# np.savez('source.npz', **state_dict)
# Finally, replace the '' below by the path to the source.npz file you
# just created.
return epath.Path('')
elif 'office_home' in dataset_name:
_, domain = dataset_name.split('/')
# The public checkpoint doesn't exist because it's derived from a
# PyTorch checkpoint (https://github.com/Albert0147/NRC_SFDA, which
# points to the Google Drive directory
# https://drive.google.com/drive/folders/10QMTQZqFgEwbvFGdgz7VSha7NYG4q6sh).
# Download the .pt files locally (for a2c, c2a, p2a, and r2a), then save
# each of them into a .npz file using the following command:
# state_dict = {}; load_fn = lambda letter: state_dict.update({
# f'{letter}.{k}': v for k, v in torch.load(
# f'source_{letter}.pt',
# map_location=torch.device('cpu')).items()})
# load_fn('B'); load_fn('C'); load_fn('F')
# np.savez('source.npz', **state_dict)
# Finally, replace the '' below by the path to the source.npz files you
# just created using `domain` to determine which one to point to.
return epath.Path('')
else:
raise NotImplementedError(
f'No pretrained checkpoint available for dataset {dataset_name}.'
)
@staticmethod
def get_input_pipeline(
data_builder: tfds.core.DatasetBuilder, split: str, **kwargs
) -> tf.data.Dataset:
image_size = kwargs['image_size']
padded_image_size = image_size + resnet.CROP_PADDING
dtype = tf.float32
read_config = tfds.ReadConfig(add_tfds_id=True)
def process_example(example):
image = example['image']
# Resize and crop.
image = example['image']
image = tf.image.resize(
[image],
[padded_image_size, padded_image_size],
method=tf.image.ResizeMethod.BILINEAR,
)[0]
image = tf.image.central_crop(image, image_size / padded_image_size)
# Reshape and normalize.
image = tf.reshape(image, [image_size, image_size, 3])
image -= tf.constant(resnet.MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(
resnet.STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype
)
image = tf.image.convert_image_dtype(image, dtype=dtype)
label = tf.one_hot(
example['label'], data_builder.info.features['label'].num_classes
)
return {'image': image, 'label': label, 'tfds_id': example['tfds_id']}
dataset = data_builder.as_dataset(split=split, read_config=read_config)
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
dataset = dataset.with_options(options)
dataset = dataset.map(
process_example, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return dataset
def _to_variables(
state_dict: dict[str, np.ndarray], dataset_name: str
) -> flax.core.scope.FrozenVariableDict:
"""Translates a PyTorch-style state dictionnary into a FrozenVariableDict.
Args:
state_dict: The PyTorch state_dict to translate.
dataset_name: The name of the dataset the model was trained on, indicative
of the model architecture used.
Returns:
The translated version of the state_dict.
Raises:
RuntimeError: If some convolutional kernel has neither 2 or 4 dimensions.
"""
if dataset_name != 'vis_da_c' and 'office_home' not in dataset_name:
raise ValueError
if dataset_name == 'vis_da_c':
bottleneck_dense_re = r'^B\.bottleneck'
bottleneck_bn_re = r'^B\.bn'
else:
bottleneck_dense_re = r'^F\.bottle'
bottleneck_bn_re = r'^F\.bn'
state_dict = {
k: v for k, v in state_dict.items() if 'feature_layers' not in k
}
flat_params = {}
flat_batch_stats = {}
def _match_to_block_name(m):
if dataset_name == 'vis_da_c':
block_index_offsets = [0, 3, 7, 30]
else:
block_index_offsets = [0, 3, 7, 13]
block_index = int(m.group(2)) + block_index_offsets[int(m.group(1)) - 1]
return f'BottleneckResNetBlock_{block_index}.'
renames = [
# Groups and blocks
functools.partial(
re.compile(r'^F\.layer(\d*)\.(\d*)\.').sub, repl=_match_to_block_name
),
# Initial convolution
functools.partial(re.compile(r'^F\.conv1').sub, repl=r'conv_init'),
# Initial normalization
functools.partial(re.compile(r'^F\.bn1').sub, repl=r'bn_init'),
# Bottleneck
functools.partial(
re.compile(bottleneck_dense_re).sub, repl=r'bottleneck_dense'
),
functools.partial(
re.compile(bottleneck_bn_re).sub, repl=r'bottleneck_bn'
),
# Output layer
functools.partial(re.compile(r'^C\.fc').sub, repl=r'WNDense_0'),
# Convolutional layers
functools.partial(
re.compile(r'conv(\d)').sub,
repl=lambda m: f'Conv_{int(m.group(1)) - 1}',
),
# Normalization layers
functools.partial(
re.compile(r'bn(\d)').sub,
repl=lambda m: f'BatchNorm_{int(m.group(1)) - 1}',
),
# Downsampling layers
functools.partial(
re.compile(r'downsample\.(\d)').sub,
repl=lambda m: 'norm_proj' if int(m.group(1)) else 'conv_proj',
),
# Normalization scaling coefficients. All other renamings of 'weight' map
# to 'kernel', so we perform this renaming first.
functools.partial(
re.compile(r'BatchNorm_(\d)\.weight').sub, repl=r'BatchNorm_\1.scale'
),
functools.partial(
re.compile(r'bn_init\.weight').sub, repl=r'bn_init.scale'
),
functools.partial(
re.compile(r'norm_proj\.weight').sub, repl=r'norm_proj.scale'
),
functools.partial(
re.compile(r'bottleneck_bn\.weight').sub, repl=r'bottleneck_bn.scale'
),
# Convolutional kernels
functools.partial(re.compile(r'weight').sub, repl=r'kernel'),
# Batch statistics
functools.partial(re.compile(r'running_mean').sub, repl=r'mean'),
functools.partial(re.compile(r'running_var').sub, repl=r'var'),
]
for key, value in state_dict.items():
# We don't need the 'num_batches_tracked' variables.
if 'num_batches_tracked' in key:
continue
# Perform renaming.
for rename in renames:
key = rename(string=key)
# Transpose convolutional kernels and weight matrices.
if 'kernel' in key:
if len(value.shape) == 2:
value = value.transpose()
elif len(value.shape) == 4:
value = value.transpose(2, 3, 1, 0)
else:
raise RuntimeError
# Route parameters and batch statistics to their appropriate flat
# dictionary. Flax can unflatten dictionaries whose keys are tuples of
# strings, which we take advantage of by splitting the keys by the '.'
# character.
flat_dict = (
flat_batch_stats if 'mean' in key or 'var' in key else flat_params
)
flat_dict[tuple(key.split('.'))] = value
# NRC uses an Office-Home class order other than alphabetical, so we need to
# permute the output layer.
if 'office_home' in dataset_name:
permutation = (
[33, 32, 36, 15, 19, 2, 46, 49, 48, 53, 47, 54, 4, 18, 57, 23, 0, 45, 1]
+ [38, 5, 13, 50, 11, 58, 3, 16, 25, 10, 12, 61, 51, 9, 64, 28, 29, 26]
+ [21, 31, 62, 40, 35, 27, 14, 20, 43, 34, 37, 63, 39, 55, 41, 6, 8, 30]
+ [59, 44, 52, 60, 24, 17, 7, 42, 56, 22]
)
flat_params[('WNDense_0', 'bias')] = flat_params[('WNDense_0', 'bias')][
permutation
]
flat_params[('WNDense_0', 'kernel_g')] = flat_params[
('WNDense_0', 'kernel_g')
][:, permutation]
flat_params[('WNDense_0', 'kernel_v')] = flat_params[
('WNDense_0', 'kernel_v')
][:, permutation]
return flax.core.freeze({
'params': flax.traverse_util.unflatten_dict(flat_params),
'batch_stats': flax.traverse_util.unflatten_dict(flat_batch_stats),
})
NRCResNet50 = functools.partial(
NRCResNet, stage_sizes=[3, 4, 6, 3], block_cls=resnet.BottleneckResNetBlock
)
NRCResNet101 = functools.partial(
NRCResNet, stage_sizes=[3, 4, 23, 3], block_cls=resnet.BottleneckResNetBlock
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A toy model, used for testing/debugging purposes only."""
from chirp.models import output
from chirp.projects.sfda.models import image_model
from flax import linen as nn
import jax.numpy as jnp
class ConstantEncoderModel(image_model.ImageModel):
"""A toy model, used for testing/debugging purposes only.
The model contains a trainable head. The encoder part simply returns the
raw images, after spatial average-pooling.
"""
num_classes: int
@nn.compact
def __call__(self, x, train: bool, use_running_average: bool):
x = jnp.mean(x, axis=(1, 2))
model_outputs = {}
model_outputs['embedding'] = x
x = nn.Dense(self.num_classes, dtype=jnp.float32)(x)
model_outputs['label'] = x
return output.ClassifierOutput(**model_outputs)
@staticmethod
def is_bn_parameter(parameter_name: list[str]) -> bool:
"""Verifies whether some parameter belong to a BatchNorm layer.
Args:
parameter_name: The name of the parameter, as a list in which each member
describes the name of a layer. E.g. ('Block1', 'batch_norm_1', 'bias').
Returns:
Whether this parameter belongs to a BatchNorm layer.
"""
return False
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ResNet v1 architecture, borrowed from flax examples/ directory.
Taken from https://github.com/google/flax/blob/main/examples/imagenet/models.py.
We make the following modifications to the orignal model:
- Use of 'use_running_average' to explicitly control BatchNorm's behavior.
- Packaging the forward's output as a output.ClassifierOutput for
compatibility with the rest of the pipeline.
- Added a Dropout layer after average pooling, and before the classfication
head. This was done to inject noise during the forward pass for Dropout
Student and NOTELA.
- Added a 'load_ckpt' method, following image_model.ImageModel template.
- Added a 'get_ckpt_path' method, following image_model.ImageModel template.
- Integrated flax's input pipeline in the 'get_input_pipeline' method.
"""
import functools
from typing import Any, Callable, Sequence
from chirp.models import output
from chirp.projects.sfda.models import image_model
from etils import epath
import flax
from flax import linen as nn
from flax.training import checkpoints as flax_checkpoints
import jax.numpy as jnp
import tensorflow as tf
import tensorflow_datasets as tfds
ModuleDef = Any
CROP_PADDING = 32
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
class ResNetBlock(nn.Module):
"""ResNet block."""
filters: int
conv: ModuleDef
norm: ModuleDef
act: Callable[[jnp.ndarray], jnp.ndarray]
strides: tuple[int, int] = (1, 1)
@nn.compact
def __call__(
self,
x,
):
residual = x
y = self.conv(self.filters, (3, 3), self.strides)(x)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters, (3, 3))(y)
y = self.norm(scale_init=nn.initializers.zeros)(y)
if residual.shape != y.shape:
residual = self.conv(
self.filters, (1, 1), self.strides, name='conv_proj'
)(residual)
residual = self.norm(name='norm_proj')(residual)
return self.act(residual + y)
class BottleneckResNetBlock(nn.Module):
"""Bottleneck ResNet block."""
filters: int
conv: ModuleDef
norm: ModuleDef
act: Callable[[jnp.ndarray], jnp.ndarray]
strides: tuple[int, int] = (1, 1)
@nn.compact
def __call__(self, x):
residual = x
y = self.conv(self.filters, (1, 1))(x)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters, (3, 3), self.strides)(y)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters * 4, (1, 1))(y)
y = self.norm(scale_init=nn.initializers.zeros)(y)
if residual.shape != y.shape:
residual = self.conv(
self.filters * 4, (1, 1), self.strides, name='conv_proj'
)(residual)
residual = self.norm(name='norm_proj')(residual)
return self.act(residual + y)
class ResNet(image_model.ImageModel):
"""ResNetV1."""
stage_sizes: Sequence[int]
block_cls: ModuleDef
num_classes: int
num_filters: int = 64
dtype: jnp.dtype = jnp.float32
act: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
conv: ModuleDef = nn.Conv
@nn.compact
def __call__(self, x, train: bool, use_running_average: bool):
conv = functools.partial(self.conv, use_bias=False, dtype=self.dtype)
norm = functools.partial(
nn.BatchNorm,
use_running_average=use_running_average,
momentum=0.9,
epsilon=1e-5,
dtype=self.dtype,
)
x = conv(
self.num_filters,
(7, 7),
(2, 2),
padding=[(3, 3), (3, 3)],
name='conv_init',
)(x)
x = norm(name='bn_init')(x)
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')
for i, block_size in enumerate(self.stage_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = self.block_cls(
self.num_filters * 2**i,
strides=strides,
conv=conv,
norm=norm,
act=self.act,
)(x)
# The following Dropout was added to inject noise during foward pass.
x = nn.Dropout(0.1, deterministic=not train)(x)
x = jnp.mean(x, axis=(1, 2))
model_outputs = {}
model_outputs['embedding'] = x
x = nn.Dense(self.num_classes, dtype=self.dtype)(x)
x = jnp.asarray(x, self.dtype)
model_outputs['label'] = x.astype(jnp.float32)
return output.ClassifierOutput(**model_outputs)
@staticmethod
def is_bn_parameter(parameter_name: list[str]) -> bool:
"""Verifies whether some parameter belong to a BatchNorm layer.
Captures all BatchNorm parameters, except those from the residual layers
(`norm_proj_*` in the code).
Args:
parameter_name: The name of the parameter, as a list in which each member
describes the name of a layer. E.g. ('Block1', 'BatchNorm_1', 'bias').
Returns:
True if this parameter belongs to a BatchNorm layer.
"""
return any(['BatchNorm' in x for x in parameter_name])
@staticmethod
def load_ckpt(dataset_name: str) -> flax.core.frozen_dict.FrozenDict:
pretrained_ckpt_dir = ResNet.get_ckpt_path(dataset_name)
state_dict = flax_checkpoints.restore_checkpoint(
pretrained_ckpt_dir, target=None
)
variables = flax.core.freeze({
'params': state_dict['params'],
'batch_stats': state_dict['batch_stats'],
})
return variables
@staticmethod
def get_ckpt_path(dataset_name: str) -> epath.Path:
if 'imagenet' in dataset_name:
return epath.Path(
'gs://flax_public/examples/imagenet/v100_x8/checkpoint_250200'
)
else:
raise NotImplementedError(
f'No pretrained checkpoint available for dataset {dataset_name}.'
)
@staticmethod
def get_input_pipeline(
data_builder: tfds.core.DatasetBuilder, split: str, **kwargs
) -> tf.data.Dataset:
image_size = kwargs['image_size']
dtype = tf.float32
read_config = tfds.ReadConfig(add_tfds_id=True)
def _resize(image):
return tf.image.resize(
[image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC,
)[0]
def _decode_and_center_crop(image_bytes):
"""Crops to center of image with padding then scales image_size."""
shape = tf.io.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
(
(image_size / (image_size + CROP_PADDING))
* tf.cast(tf.minimum(image_height, image_width), tf.float32)
),
tf.int32,
)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height,
offset_width,
padded_center_crop_size,
padded_center_crop_size,
])
image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = _resize(image)
return image
def normalize_image(image):
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def preprocess_for_eval(image_bytes, dtype=tf.float32):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
dtype: data type of the image.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes)
image = tf.reshape(image, [image_size, image_size, 3])
image = normalize_image(image)
image = tf.image.convert_image_dtype(image, dtype=dtype)
return image
def decode_example(example):
image = preprocess_for_eval(example['image'], dtype=dtype)
label = tf.one_hot(
example['label'], data_builder.info.features['label'].num_classes
)
return {'image': image, 'label': label, 'tfds_id': example['tfds_id']}
dataset = data_builder.as_dataset(
split=split,
decoders={'image': tfds.decode.SkipDecoding()},
read_config=read_config,
)
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
dataset = dataset.with_options(options)
dataset = dataset.map(
decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return dataset
ResNet18 = functools.partial(
ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock
)
ResNet34 = functools.partial(
ResNet, stage_sizes=[3, 4, 6, 3], block_cls=ResNetBlock
)
ResNet50 = functools.partial(
ResNet, stage_sizes=[3, 4, 6, 3], block_cls=BottleneckResNetBlock
)
ResNet101 = functools.partial(
ResNet, stage_sizes=[3, 4, 23, 3], block_cls=BottleneckResNetBlock
)
ResNet152 = functools.partial(
ResNet, stage_sizes=[3, 8, 36, 3], block_cls=BottleneckResNetBlock
)
ResNet200 = functools.partial(
ResNet, stage_sizes=[3, 24, 36, 3], block_cls=BottleneckResNetBlock
)
ResNet18Local = functools.partial(
ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock, conv=nn.ConvLocal
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The WideResnet architecture.
Translated from PyTorch version at https://github.com/RobustBench/robustbench/
blob/master/robustbench/model_zoo/architectures/wide_resnet.py. We make the
following modifications to the orignal model:
- Use of 'use_running_average' to explicitly control BatchNorm's behavior.
- Packaging the forward's output as a output.ClassifierOutput for
compatibility with the rest of the pipeline.
- Added a Dropout layer after average pooling, and before the classfication
head. This was done to inject noise during the forward pass for Dropout
Student and NOTELA.
- Added a 'load_ckpt' method, following image_model.ImageModel template.
- Added a 'get_ckpt_path' method, following image_model.ImageModel template.
"""
import functools
import re
from chirp.models import output
from chirp.projects.sfda.models import image_model
from etils import epath
import flax
from flax import linen as nn
import jax
import jax.numpy as jnp
import numpy as np
class WideResnetBlock(nn.Module):
"""Defines a single WideResnetBlock.
Attributes:
channels: How many channels to use in the convolutional layers.
strides: Strides for the pooling.
"""
channels: int
strides: tuple[int, int] = (1, 1)
@nn.compact
def __call__(
self, x: jnp.ndarray, train: bool, use_running_average: bool
) -> jnp.ndarray:
bn1 = nn.BatchNorm(use_running_average=use_running_average, name='norm_1')
bn2 = nn.BatchNorm(use_running_average=use_running_average, name='norm_2')
conv1 = nn.Conv(
self.channels,
kernel_size=(3, 3),
strides=self.strides,
padding=1,
use_bias=False,
name='conv1',
)
conv2 = nn.Conv(
self.channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=1,
use_bias=False,
name='conv2',
)
if x.shape[-1] == self.channels:
out = jax.nn.relu(bn1(x))
out = jax.nn.relu(bn2(conv1(out)))
out = conv2(out)
out += x
else:
x = jax.nn.relu(bn1(x))
out = jax.nn.relu(bn2(conv1(x)))
out = conv2(out)
out += nn.Conv(
self.channels,
(1, 1),
self.strides,
padding='VALID',
use_bias=False,
name='conv_shortcut',
)(x)
return out
class WideResnetGroup(nn.Module):
"""Defines a WideResnetGroup.
Attributes:
blocks_per_group: How many resnet blocks to add to each group (should be 4
blocks for a WRN28, and 6 for a WRN40).
channels: How many channels to use in the convolutional layers.
strides: Strides for the pooling.
"""
blocks_per_group: int
channels: int
strides: tuple[int, int] = (1, 1)
@nn.compact
def __call__(
self, x: jnp.ndarray, train: bool, use_running_average: bool
) -> jnp.ndarray:
for i in range(self.blocks_per_group):
x = WideResnetBlock(self.channels, self.strides if i == 0 else (1, 1))(
x, train, use_running_average
)
return x
class WideResnet(image_model.ImageModel):
"""Defines the WideResnet Model.
Attributes:
blocks_per_group: How many resnet blocks to add to each group (should be 4
blocks for a WRN28, and 6 for a WRN40).
channel_multiplier: The multiplier to apply to the number of filters in the
model (1 is classical resnet, 10 for WRN28-10, etc...).
num_classes: Dimension of the output of the model (ie number of classes for
a classification problem).
"""
blocks_per_group: int
channel_multiplier: int
num_classes: int
@nn.compact
def __call__( # pytype: disable=signature-mismatch # jax-ndarray
self, x: jnp.ndarray, train: bool, use_running_average: bool
) -> jnp.ndarray:
x = nn.Conv(16, (3, 3), padding=1, name='init_conv', use_bias=False)(x)
x = WideResnetGroup(self.blocks_per_group, 16 * self.channel_multiplier)(
x, train=train, use_running_average=use_running_average
)
x = WideResnetGroup(
self.blocks_per_group,
32 * self.channel_multiplier,
(2, 2),
)(x, train=train, use_running_average=use_running_average)
x = WideResnetGroup(
self.blocks_per_group,
64 * self.channel_multiplier,
(2, 2),
)(x, train=train, use_running_average=use_running_average)
x = jax.nn.relu(
nn.BatchNorm(
use_running_average=use_running_average, name='pre-pool-norm'
)(x)
)
# The following Dropout was added to inject noise during foward pass.
x = nn.Dropout(0.1, deterministic=not train)(x)
x = nn.avg_pool(x, x.shape[1:3])
x = x.reshape((x.shape[0], -1))
model_outputs = {}
model_outputs['embedding'] = x
# Head
outputs = nn.Dense(self.num_classes)(x)
model_outputs['label'] = outputs.astype(jnp.float32)
return output.ClassifierOutput(**model_outputs) # pytype: disable=bad-return-type # jax-ndarray
@staticmethod
def load_ckpt(dataset_name: str) -> flax.core.frozen_dict.FrozenDict:
pretrained_ckpt_dir = WideResnet.get_ckpt_path(dataset_name)
with pretrained_ckpt_dir.open('rb') as f:
state_dict = dict(np.load(f))
variables = _to_variables(state_dict)
return variables
@staticmethod
def get_ckpt_path(dataset_name: str) -> epath.Path:
if 'cifar' in dataset_name:
# The public checkpoint doesn't exist because it's derived from a
# PyTorch checkpoint (https://github.com/RobustBench/robustbench/
# blob/master/robustbench/model_zoo/cifar10.py#L760, which points to
# https://drive.google.com/open?id= 1t98aEuzeTL8P7Kpd5DIrCoCL21BNZUhC).
# Please, download this file, open it locally and save it into an
# npz file using the following command:
# np.savez(output_path, **torch.load(torch_checkpoint_path,
# map_location=torch.device('cpu'))['state_dict'])
# Finally, replace the '' below by the `output_path` above.
return epath.Path('')
else:
raise NotImplementedError(
f'No pretrained checkpoint available for dataset {dataset_name}.'
)
@staticmethod
def is_bn_parameter(parameter_name: list[str]) -> bool:
"""Verifies whether some parameter belong to a BatchNorm layer.
Only WideResnetGroup's BatchNorm parameters will be captured; the
pre-pool-norm won't be included.
Args:
parameter_name: The name of the parameter, as a list in which each member
describes the name of a layer. E.g. ('Block1', 'batch_norm_1', 'bias').
Returns:
True if this parameter belongs to a BatchNorm layer.
"""
return any(['norm_' in x for x in parameter_name])
def _to_variables(
state_dict: dict[str, np.ndarray]
) -> flax.core.scope.FrozenVariableDict:
"""Translates a PyTorch-style state dictionnary into a flax FrozenVariableDict.
Args:
state_dict: The PyTorch state_dict to translate.
Returns:
The translated version of the state_dict.
Raises:
RuntimeError: If some convolutional kernel has neither 2 or 4 dimensions.
"""
flat_params = {}
flat_batch_stats = {}
renames = [
# Groups
functools.partial(
re.compile(r'block(\d)').sub,
repl=lambda m: f'WideResnetGroup_{int(m.group(1)) - 1}',
),
# Blocks
functools.partial(
re.compile(r'layer\.(\d)').sub, repl=r'WideResnetBlock_\1'
),
# Initial convolution
functools.partial(re.compile(r'^conv1').sub, repl=r'init_conv'),
# Pre-pooling normalization
functools.partial(re.compile(r'^bn1').sub, repl=r'pre-pool-norm'),
# Output layer
functools.partial(re.compile(r'fc').sub, repl=r'Dense_0'),
# Normalization layers
functools.partial(re.compile(r'bn(\d)').sub, repl=r'norm_\1'),
# Convolutional shortcut layers
functools.partial(re.compile(r'convShortcut').sub, repl=r'conv_shortcut'),
# Normalization scaling coefficients. All other renamings of 'weight' map
# to 'kernel', so we perform this renaming first.
functools.partial(
re.compile(r'norm_(\d)\.weight').sub, repl=r'norm_\1.scale'
),
functools.partial(
re.compile(r'pre-pool-norm.weight').sub, repl=r'pre-pool-norm.scale'
),
# Convolutional kernels
functools.partial(re.compile(r'weight').sub, repl=r'kernel'),
# Batch statistics
functools.partial(re.compile(r'running_mean').sub, repl=r'mean'),
functools.partial(re.compile(r'running_var').sub, repl=r'var'),
]
transposes = [re.compile('kernel').search] # pylint: disable=unused-variable
for key, value in state_dict.items():
# We don't need the 'num_batches_tracked' variables.
if 'num_batches_tracked' in key:
continue
# Perform renaming.
for rename in renames:
key = rename(string=key)
# Transpose convolutional kernels and weight matrices.
if 'kernel' in key:
if len(value.shape) == 2:
value = value.transpose()
elif len(value.shape) == 4:
value = value.transpose(2, 3, 1, 0)
else:
raise RuntimeError
# Route parameters and batch statistics to their appropriate flat
# dictionary. Flax can unflatten dictionaries whose keys are tuples of
# strings, which we take advantage of by splitting the keys by the '.'
# character.
flat_dict = (
flat_batch_stats if 'mean' in key or 'var' in key else flat_params
)
flat_dict[tuple(key.split('.'))] = value
return flax.core.freeze({
'params': flax.traverse_util.unflatten_dict(flat_params),
'batch_stats': flax.traverse_util.unflatten_dict(flat_batch_stats),
})
WideResNet2810 = functools.partial(
WideResnet, blocks_per_group=4, channel_multiplier=10
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An extension of chirp's original taxonomy_model."""
from chirp.models import taxonomy_model
class TaxonomyModel(taxonomy_model.TaxonomyModel):
"""Adding parameters masking utility to chirp's original taxonomy_model."""
@staticmethod
def is_bn_parameter(parameter_name: list[str]) -> bool:
"""Verifies whether some parameter belong to a BatchNorm layer.
Args:
parameter_name: The name of the parameter, as a list in which each member
describes the name of a layer. E.g. ('Block1', 'batch_norm_1', 'bias').
Returns:
True if this parameter belongs to a BatchNorm layer.
"""
return any(['BatchNorm' in x for x in parameter_name])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for Dropout Student method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
image_config.online_pl_updates = True
image_config.alpha = 0.1
image_config.normalize_pseudo_labels = True
# Foward options
image_config.num_epochs = 10
image_config.use_dropout = True
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
audio_config.online_pl_updates = False
audio_config.alpha = 1.0
audio_config.normalize_pseudo_labels = True
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config(
"dropout_student.DropoutStudent"
)
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the DUST SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-5
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.num_random_passes = 4
image_config.kl_threshold = 0.8
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.0001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.num_random_passes = 4
audio_config.kl_threshold = 0.9
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("dust.DUST")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for NRC SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-05
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.base_affinity = 0.2
image_config.nn = 15
image_config.extended_nn = 15
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-5
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.base_affinity = 0.1
audio_config.nn = 5
audio_config.extended_nn = 5
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("nrc.NRC")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the DUST SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-5
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.num_random_passes = 4
image_config.kl_threshold = 0.8
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.0001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.num_random_passes = 4
audio_config.kl_threshold = 0.9
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("dust.DUST")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model."""
from chirp import config_utils
from chirp.projects.sfda import adapt
from chirp.projects.sfda import models
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = config_dict.ConfigDict()
config.modality = adapt.Modality.IMAGE
config.multi_label = False
config.eval_every = 1 # in epochs
config.batch_size_adaptation = 64
config.batch_size_eval = 64
init_config = config_dict.ConfigDict()
init_config.rng_seed = 0
init_config.target_class_list = "cifar10_corrupted"
init_config.corruption_name = "gaussian_noise"
init_config.corruption_severity = 5
init_config.source_domain = "art"
init_config.target_domain = "clipart"
init_config.pretrained_model = True
config.init_config = init_config
model_config = config_dict.ConfigDict()
model_config.encoder = models.ImageModelName.WIDERESNET
config.model_config = model_config
config.eval_mca_every = -1
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for NOTELA (our proposed method)."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
image_config.online_pl_updates = True
image_config.knn = 15
image_config.lambda_ = 2.0
image_config.alpha = 1.0
image_config.use_mutual_nn = True
image_config.normalize_pseudo_labels = True
# Foward options
image_config.num_epochs = 10
image_config.use_dropout = True
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.0001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
audio_config.knn = 15
audio_config.lambda_ = 2.0
audio_config.alpha = 1.0
audio_config.online_pl_updates = True
audio_config.use_mutual_nn = True
audio_config.normalize_pseudo_labels = True
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("notela.NOTELA")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base configuration for bio-acoustic SFDA experiments."""
from chirp import config_utils
from chirp.projects.sfda import adapt
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
sample_rate_hz = config_dict.FieldReference(32_000)
target_class_list = config_dict.FieldReference("xenocanto")
namespace = config_dict.FieldReference("ebird2021")
add_taxonomic_labels = config_dict.FieldReference(True)
config = config_dict.ConfigDict()
config.modality = adapt.Modality.AUDIO
config.multi_label = False
config.eval_every = 1 # in epochs
config.sample_rate_hz = sample_rate_hz
tfds_data_dir = config_dict.FieldReference("")
# Configure the data
batch_size = config_dict.FieldReference(64)
window_size_s = config_dict.FieldReference(5)
seed = config_dict.FieldReference(0)
config.tfds_data_dir = tfds_data_dir
config.batch_size = batch_size
config.target_class_list = target_class_list
adaptation_data_config = config_dict.ConfigDict()
adaptation_data_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c("pipeline.FilterMultiLabelRecordings"),
_c(
"pipeline.ConvertBirdTaxonomyLabels",
source_namespace=namespace,
target_class_list=target_class_list,
add_taxonomic_labels=add_taxonomic_labels,
),
_c("pipeline.Shuffle", shuffle_buffer_size=512, seed=seed),
_c(
"sfda_pipeline.Batch",
batch_size=batch_size,
split_across_devices=True,
),
_c("pipeline.NormalizeAudio", target_gain=0.2),
],
)
adaptation_data_config.split = "[(0, 75)]"
adaptation_data_config.tfds_data_dir = tfds_data_dir
adaptation_data_config.dataset_directory = "soundscapes/high_sierras:1.0.1"
config.adaptation_data_config = adaptation_data_config
eval_data_config = config_dict.ConfigDict()
eval_data_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c("pipeline.FilterMultiLabelRecordings"),
_c(
"pipeline.ConvertBirdTaxonomyLabels",
source_namespace=namespace,
target_class_list=target_class_list,
add_taxonomic_labels=add_taxonomic_labels,
),
_c(
"sfda_pipeline.Batch",
batch_size=batch_size,
split_across_devices=True,
),
_c("pipeline.NormalizeAudio", target_gain=0.2),
],
)
eval_data_config.split = "[(75, 100)]"
eval_data_config.tfds_data_dir = tfds_data_dir
eval_data_config.dataset_directory = "soundscapes/high_sierras:1.0.1"
config.eval_data_config = eval_data_config
# Configure the experiment setup
init_config = config_dict.ConfigDict()
init_config.rng_seed = seed
init_config.target_class_list = target_class_list
init_config.input_shape = ((window_size_s * sample_rate_hz).get(),)
init_config.pretrained_model = True
# Configure model
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
"efficientnet.EfficientNet",
model=_c("efficientnet.EfficientNetModel", value="b1"),
) # remove any dropout from the model
model_config.taxonomy_loss_weight = 0.25
model_config.frontend = _c(
"frontend.MorletWaveletTransform",
features=160,
stride=sample_rate_hz // 100,
kernel_size=2_048, # ~0.025 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=_c("frontend.PCENScalingConfig", conv_width=0),
)
init_config.pretrained_ckpt_dir = ""
config.model_config = model_config
config.init_config = init_config
config.eval_mca_every = -1
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the globals that can be used in configuration files."""
from typing import Any
from chirp import audio_utils
from chirp.eval import eval_lib
from chirp.models import conformer
from chirp.models import efficientnet
from chirp.models import frontend
from chirp.models import hubert
from chirp.models import layers
from chirp.models import quantizers
from chirp.models import soundstream_unet
from chirp.preprocessing import pipeline
from chirp.projects.sfda.data import pipeline as sfda_pipeline
from chirp.projects.sfda.methods import ada_bn
from chirp.projects.sfda.methods import dropout_student
from chirp.projects.sfda.methods import dust
from chirp.projects.sfda.methods import notela
from chirp.projects.sfda.methods import nrc
from chirp.projects.sfda.methods import pseudo_label
from chirp.projects.sfda.methods import shot
from chirp.projects.sfda.methods import tent
from flax import linen as nn
def get_globals() -> dict[str, Any]:
return {
"audio_utils": audio_utils,
"conformer": conformer,
"efficientnet": efficientnet,
"eval_lib": eval_lib,
"hubert": hubert,
"quantizers": quantizers,
"frontend": frontend,
"layers": layers,
"nn": nn,
"pipeline": pipeline,
"sfda_pipeline": sfda_pipeline,
"soundstream_unet": soundstream_unet,
"pseudo_label": pseudo_label,
"tent": tent,
"notela": notela,
"shot": shot,
"ada_bn": ada_bn,
"dropout_student": dropout_student,
"nrc": nrc,
"dust": dust,
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for NRC SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-05
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.base_affinity = 0.2
image_config.nn = 15
image_config.extended_nn = 15
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.0001
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.base_affinity = 0.1
audio_config.nn = 15
audio_config.extended_nn = 10
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("nrc.NRC")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for NOTELA (our proposed method)."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
image_config.online_pl_updates = True
image_config.knn = 15
image_config.lambda_ = 2.0
image_config.alpha = 1.0
image_config.use_mutual_nn = True
image_config.normalize_pseudo_labels = True
# Foward options
image_config.num_epochs = 10
image_config.use_dropout = True
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
audio_config.knn = 5
audio_config.lambda_ = 1.0
audio_config.alpha = 1.0
audio_config.online_pl_updates = False
audio_config.use_mutual_nn = True
audio_config.normalize_pseudo_labels = True
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("notela.NOTELA")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for Test-time entropy minimization.
Wang, Dequan, et al. "Tent: Fully test-time adaptation by entropy minimization."
arXiv preprint arXiv:2006.10726 (2020).
"""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-5
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("tent.Tent")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for Test-time entropy minimization.
Wang, Dequan, et al. "Tent: Fully test-time adaptation by entropy minimization."
arXiv preprint arXiv:2006.10726 (2020).
"""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("tent.Tent")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for AdaBN method."""
from chirp import config_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
image_config.optimizer_config = None
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
audio_config.optimizer_config = None
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = True
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("ada_bn.AdaBN")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the pseudo_label SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-3
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.confidence_threshold = 0.9
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-5
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.confidence_threshold = 0.5
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config(
"pseudo_label.PseudoLabel"
)
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for AdaBN method."""
from chirp import config_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
image_config.optimizer_config = None
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
audio_config.optimizer_config = None
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = True
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("ada_bn.AdaBN")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for Dropout Student method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
image_config.online_pl_updates = True
image_config.alpha = 0.1
image_config.normalize_pseudo_labels = True
# Foward options
image_config.num_epochs = 10
image_config.use_dropout = True
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparams
audio_config.online_pl_updates = False
audio_config.alpha = 0.5
audio_config.normalize_pseudo_labels = True
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config(
"dropout_student.DropoutStudent"
)
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for SHOT SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.beta = 0.3
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.ALL
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.beta = 0.6
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("shot.SHOT")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base configuration for bio-acoustic SFDA experiments."""
from chirp import config_utils
from chirp.projects.sfda import adapt
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
sample_rate_hz = config_dict.FieldReference(32_000)
target_class_list = config_dict.FieldReference("xenocanto")
namespace = config_dict.FieldReference("ebird2021")
add_taxonomic_labels = config_dict.FieldReference(True)
config = config_dict.ConfigDict()
config.modality = adapt.Modality.AUDIO
config.multi_label = True
config.eval_every = 1 # in epochs
config.sample_rate_hz = sample_rate_hz
tfds_data_dir = config_dict.FieldReference("")
# Configure the data
batch_size = config_dict.FieldReference(64)
window_size_s = config_dict.FieldReference(5)
seed = config_dict.FieldReference(0)
config.tfds_data_dir = tfds_data_dir
config.batch_size = batch_size
config.target_class_list = target_class_list
adaptation_data_config = config_dict.ConfigDict()
adaptation_data_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c(
"pipeline.ConvertBirdTaxonomyLabels",
source_namespace=namespace,
target_class_list=target_class_list,
add_taxonomic_labels=add_taxonomic_labels,
),
_c("pipeline.Shuffle", shuffle_buffer_size=512, seed=seed),
_c(
"sfda_pipeline.Batch",
batch_size=batch_size,
split_across_devices=True,
),
_c("pipeline.NormalizeAudio", target_gain=0.2),
],
)
adaptation_data_config.split = "[(0, 75)]"
adaptation_data_config.tfds_data_dir = tfds_data_dir
adaptation_data_config.dataset_directory = "soundscapes/high_sierras:1.0.1"
config.adaptation_data_config = adaptation_data_config
eval_data_config = config_dict.ConfigDict()
eval_data_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c(
"pipeline.ConvertBirdTaxonomyLabels",
source_namespace=namespace,
target_class_list=target_class_list,
add_taxonomic_labels=add_taxonomic_labels,
),
_c(
"sfda_pipeline.Batch",
batch_size=batch_size,
split_across_devices=True,
),
_c("pipeline.NormalizeAudio", target_gain=0.2),
],
)
eval_data_config.split = "[(75, 100)]"
eval_data_config.tfds_data_dir = tfds_data_dir
eval_data_config.dataset_directory = "soundscapes/high_sierras:1.0.1"
config.eval_data_config = eval_data_config
# Configure the experiment setup
init_config = config_dict.ConfigDict()
init_config.rng_seed = seed
init_config.target_class_list = target_class_list
init_config.input_shape = ((window_size_s * sample_rate_hz).get(),)
init_config.pretrained_model = True
# Configure model
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
"efficientnet.EfficientNet",
model=_c("efficientnet.EfficientNetModel", value="b1"),
) # remove any dropout from the model
model_config.taxonomy_loss_weight = 0.25
model_config.frontend = _c(
"frontend.MorletWaveletTransform",
features=160,
stride=sample_rate_hz // 100,
kernel_size=2_048, # ~0.025 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=_c("frontend.PCENScalingConfig", conv_width=0),
)
init_config.pretrained_ckpt_dir = ""
config.model_config = model_config
config.init_config = init_config
config.eval_mca_every = -1
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the pseudo_label SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-3
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.confidence_threshold = 0.9
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 0.001
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.NONE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.confidence_threshold = 0.5
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = True
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config(
"pseudo_label.PseudoLabel"
)
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for SHOT SFDA method."""
from chirp import config_utils
from chirp.projects.sfda import model_utils
from ml_collections import config_dict
def get_image_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
image_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-4
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
image_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
image_config.beta = 0.3
# Forward options
image_config.num_epochs = 10
image_config.use_dropout = False
image_config.update_bn_statistics = True
return image_config
def get_audio_config() -> config_dict.ConfigDict: # pylint: disable=missing-function-docstring
# Configure adaptation
audio_config = config_dict.ConfigDict()
optimizer_cfg = config_dict.ConfigDict()
optimizer_cfg.optimizer = "adam"
optimizer_cfg.opt_kwargs = {"momentum": 0.9, "nesterov": True}
optimizer_cfg.weight_decay = 0.0
optimizer_cfg.learning_rate = 1e-5
optimizer_cfg.learning_rate_decay = model_utils.LearningRateDecay.COSINE
optimizer_cfg.mult_learning_rate_resnet_base = 1.0
optimizer_cfg.trainable_params_strategy = model_utils.TrainableParams.BN
audio_config.optimizer_config = optimizer_cfg
# Method-specifc hparam
audio_config.beta = 0.0
# Forward options
audio_config.num_epochs = 10
audio_config.use_dropout = False
audio_config.update_bn_statistics = False
return audio_config
def get_config() -> config_dict.ConfigDict:
method_config = config_dict.ConfigDict()
method_config.sfda_method = config_utils.callable_config("shot.SHOT")
method_config.audio = get_audio_config()
method_config.image = get_image_config()
return method_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parts of the audio data pipeline that require customization."""
import dataclasses
from absl import logging
from chirp.preprocessing import pipeline
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
@dataclasses.dataclass
class Batch(pipeline.DatasetPreprocessOp):
"""Collects samples into batches.
The original Batch operation in chirp/data/pipeline.py drops the remainder
by default. Combined with shuffling, this results in two runs over the dataset
resulting in a potentially disjoint set of samples. Some methods, e.g. NOTELA
or SHOT, assign pseudo-labels to samples before an epoch starts, and try to
have the model match those pseudo-labels on-the-go. Therefore, those methods
require a consistent set of samples across runs over the dataset. We solve
this by simply keeping the remainder, thereby ensuring that all samples are
seen at every run.
Attributes:
batch_size: The batch size to use.
split_across_devices: If true, the minibatch will be split into smaller
minibatches to be distributed across the local devices present. This is
useful for distributed training.
"""
batch_size: int
split_across_devices: bool = False
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
if self.split_across_devices:
if self.batch_size % jax.device_count():
raise ValueError(
f'batch size ({self.batch_size}) must be divisible by '
f'number of devices ({jax.device_count()}).'
)
logging.info(
'Splitting batch across %d devices, with local device count %d.',
jax.device_count(),
jax.local_device_count(),
)
dataset = dataset.batch(
self.batch_size // jax.device_count(), drop_remainder=False
)
return dataset.batch(jax.local_device_count(), drop_remainder=False)
else:
return dataset.batch(self.batch_size, drop_remainder=False)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisDa-C dataset."""
from .visda import VisDaC
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisDa-C dataset."""
import itertools
import tensorflow_datasets as tfds
_DESCRIPTION = """
VisDa-C dataset.
"""
_CITATION = """
@article{peng2017visda,
title={Visda: The visual domain adaptation challenge},
author={Peng, Xingchao and Usman, Ben and Kaushik, Neela and Hoffman, Judy and
Wang, Dequan and Saenko, Kate},
journal={arXiv preprint arXiv:1710.06924},
year={2017}
}
"""
class VisDaC(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for the VisDa-C dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
TRAIN_DATA_URL = 'http://csr.bu.edu/ftp/visda17/clf/train.tar'
VALIDATION_DATA_URL = 'http://csr.bu.edu/ftp/visda17/clf/validation.tar'
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(
names=[
'aeroplane',
'bicycle',
'bus',
'car',
'horse',
'knife',
'motorcycle',
'person',
'plant',
'skateboard',
'train',
'truck',
]
),
}),
supervised_keys=('image', 'label'),
homepage=(
'https://github.com/VisionLearningGroup/taskcv-2017-public/'
'tree/master/classification'
),
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
paths = dl_manager.download_and_extract({
'train': self.TRAIN_DATA_URL,
'validation': self.VALIDATION_DATA_URL,
})
return {
'train': self._generate_examples(data_path=paths['train'] / 'train'),
'validation': self._generate_examples(
data_path=paths['validation'] / 'validation'
),
}
def _generate_examples(self, data_path):
counter = itertools.count()
for path in data_path.iterdir():
if path.is_dir():
for image_path in path.iterdir():
yield next(counter), {
'image': image_path,
'label': path.name,
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Office-31 dataset."""
import itertools
import tensorflow_datasets as tfds
_DESCRIPTION = """
Office-31 dataset.
"""
_CITATION = """
@inproceedings{saenko2010adapting,
title={Adapting visual category models to new domains},
author={Saenko, Kate and Kulis, Brian and Fritz, Mario and Darrell, Trevor},
booktitle={Proceedings of the European Conference on Computer Vision},
pages={213--226},
year={2010},
}
"""
class Office31(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for the Office-31 dataset."""
name = 'office_31'
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
DATA_URL = ('https://drive.google.com/uc?export=download&'
'id=0B4IapRTv9pJ1WGZVd1VDMmhwdlE')
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(name=name)
for name in ('amazon', 'dslr', 'webcam')
]
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(
names=[
'back_pack',
'bike',
'bike_helmet',
'bookcase',
'bottle',
'calculator',
'desk_chair',
'desk_lamp',
'desktop_computer',
'file_cabinet',
'headphones',
'keyboard',
'laptop_computer',
'letter_tray',
'mobile_phone',
'monitor',
'mouse',
'mug',
'paper_notebook',
'pen',
'phone',
'printer',
'projector',
'punchers',
'ring_binder',
'ruler',
'scissors',
'speaker',
'stapler',
'tape_dispenser',
'trash_can',
]
),
}),
supervised_keys=('image', 'label'),
homepage='https://faculty.cc.gatech.edu/~judy/domainadapt/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
paths = dl_manager.download_and_extract({'train': self.DATA_URL})
return {
'train': self._generate_examples(
data_path=paths['train'] / self.builder_config.name / 'images'
),
}
def _generate_examples(self, data_path):
counter = itertools.count()
for path in data_path.iterdir():
for image_path in path.iterdir():
yield next(counter), {
'image': image_path,
'label': path.name,
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Office-31 dataset."""
from .office_31 import Office31
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Office-Home dataset."""
import itertools
import tensorflow_datasets as tfds
_DESCRIPTION = """
Office-Home dataset.
"""
_CITATION = """
@inproceedings{venkateswara2017deep,
title={Deep hashing network for unsupervised domain adaptation},
author={Venkateswara, Hemanth and Eusebio, Jose and Chakraborty, Shayok and Panchanathan, Sethuraman},
booktitle={Proceedings of the Conference on Computer Vision and Pattern Recognition},
pages={5018--5027},
year={2017}
}
"""
_MANUAL_DOWNLOAD_INSTRUCTIONS = """
Download and unzip OfficeHomeDataset_10072016.zip from
https://drive.google.com/uc?export=download&id=0B81rNlvomiwed0V1YUxQdC1uOTg,
then move the OfficeHomeDataset_10072016/ directory into TFDS' manual download
directory.
"""
class OfficeHome(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for the Office-Home dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = _MANUAL_DOWNLOAD_INSTRUCTIONS
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(name=name)
for name in ('art', 'clipart', 'product', 'real_world')
]
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(
names=[
'alarm_clock',
'backpack',
'batteries',
'bed',
'bike',
'bottle',
'bucket',
'calculator',
'calendar',
'candles',
'chair',
'clipboards',
'computer',
'couch',
'curtains',
'desk_lamp',
'drill',
'eraser',
'exit_sign',
'fan',
'file_cabinet',
'flipflops',
'flowers',
'folder',
'fork',
'glasses',
'hammer',
'helmet',
'kettle',
'keyboard',
'knives',
'lamp_shade',
'laptop',
'marker',
'monitor',
'mop',
'mouse',
'mug',
'notebook',
'oven',
'pan',
'paper_clip',
'pen',
'pencil',
'postit_notes',
'printer',
'push_pin',
'radio',
'refrigerator',
'ruler',
'scissors',
'screwdriver',
'shelf',
'sink',
'sneakers',
'soda',
'speaker',
'spoon',
'table',
'telephone',
'toothbrush',
'toys',
'trash_can',
'tv',
'webcam',
]
),
}),
supervised_keys=('image', 'label'),
homepage='https://www.hemanthdv.org/officeHomeDataset.html',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
domain = {
'art': 'Art',
'clipart': 'Clipart',
'product': 'Product',
'real_world': 'Real World',
}[self.builder_config.name]
return {
'train': self._generate_examples(
data_path=dl_manager.manual_dir
/ 'OfficeHomeDataset_10072016'
/ domain
),
}
def _generate_examples(self, data_path):
counter = itertools.count()
for path in data_path.iterdir():
for image_path in path.iterdir():
yield next(counter), {
'image': image_path,
'label': path.name.lower(),
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Office-Home dataset."""
from .office_home import OfficeHome
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for training separated clustering.
The core workflow consists of the following:
a) Make a collection of wav files divided by label into sub-directories.
b) Load an `interface.EmbeddingModel`.
c) Create a MergedDataset using the directory and embedding model.
This will load all of the labeled wavs and run the embedding model over
all of them, creating an in-memory dataset.
This dataset can then be used for all kinds of small experiments, such as
training small classifiers or evaluating clustering methods.
"""
import collections
import dataclasses
import time
from typing import Dict, Sequence, Tuple
from chirp import audio_utils
from chirp.inference import interface
from etils import epath
import numpy as np
import tensorflow as tf
import tqdm
@dataclasses.dataclass
class MergedDataset:
"""In-memory dataset of labeled audio with embeddings.
Attributes:
data: Dictionary of embedding outputs.
num_classes: Number of classes.
embedding_dim: Dimension of embeddings.
labels: Tuple with the labels for each file.
"""
# The following are populated automatically from one of two classmethods.
data: Dict[str, np.ndarray]
num_classes: int
embedding_dim: int
labels: Tuple[str, ...]
@classmethod
def from_folder_of_folders(
cls,
base_dir: str,
embedding_model: interface.EmbeddingModel,
time_pooling: str = 'mean',
exclude_classes: Sequence[str] = (),
load_audio: bool = True,
target_sample_rate: int = -2,
audio_file_pattern: str = '*',
) -> 'MergedDataset':
"""Generating MergedDataset via folder-of-folders method.
Args:
base_dir: Base directory where either folder-of-folders of audio or
tfrecord embeddings are stored.
embedding_model: EmbeddingModel used to produce embeddings.
time_pooling: Key for time pooling strategy.
exclude_classes: Classes to skip.
load_audio: Whether to load audio into memory.
target_sample_rate: Resample loaded audio to this sample rate. If -1,
loads raw audio with no resampling. If -2, uses the embedding_model
sample rate.
audio_file_pattern: The glob pattern to use for finding audio files within
the sub-folders.
Returns:
MergedDataset
"""
print('Embedding from Folder of Folders...')
st = time.time()
labels, merged = embed_dataset(
base_dir=base_dir,
embedding_model=embedding_model,
time_pooling=time_pooling,
exclude_classes=exclude_classes,
load_audio=load_audio,
target_sample_rate=target_sample_rate,
audio_file_pattern=audio_file_pattern,
)
elapsed = time.time() - st
print(f'\n...embedded dataset in {elapsed:5.2f}s...')
data = merged
embedding_dim = merged['embeddings'].shape[-1]
labels = tuple(labels)
num_classes = len(labels)
print(f' found {num_classes} classes.')
class_counts = collections.defaultdict(int)
for cl, cl_str in zip(merged['label'], merged['label_str']):
class_counts[(cl, cl_str)] += 1
for (cl, cl_str), count in sorted(class_counts.items()):
print(f' class {cl_str} / {cl} : {count}')
return cls(
data=data,
embedding_dim=embedding_dim,
num_classes=num_classes,
labels=labels,
)
def create_random_train_test_split(
self,
train_ratio: float | None,
train_examples_per_class: int | None,
seed: int,
exclude_classes: Sequence[int] = (),
exclude_eval_classes: Sequence[int] = (),
):
"""Generate a train/test split with a target number of train examples."""
if train_ratio is None and train_examples_per_class is None:
raise ValueError(
'Must specify one of train_ratio and examples_per_class.'
)
elif train_ratio is not None and train_examples_per_class is not None:
raise ValueError(
'Must specify only one of train_ratio and examples_per_class.'
)
# Use a seeded shuffle to get a random ordering of the data.
locs = list(range(self.data['label'].shape[0]))
np.random.seed(seed)
np.random.shuffle(locs)
classes = set(self.data['label'])
class_counts = {cl: np.sum(self.data['label'] == cl) for cl in classes}
if train_examples_per_class is not None:
class_limits = {cl: train_examples_per_class for cl in classes}
else:
class_limits = {cl: train_ratio * class_counts[cl] for cl in classes}
classes = set(self.data['label'])
class_locs = {cl: [] for cl in classes}
train_locs = []
test_locs = []
for loc in locs:
cl = self.data['label'][loc]
if cl in exclude_classes:
continue
if len(class_locs[cl]) < class_limits[cl]:
class_locs[cl].append(loc)
train_locs.append(loc)
elif cl not in exclude_eval_classes:
test_locs.append(loc)
train_locs = np.array(train_locs)
test_locs = np.array(test_locs)
return train_locs, test_locs, class_locs
def create_keras_dataset(
self, locs: Sequence[int], is_train: bool, batch_size: int
) -> tf.data.Dataset:
"""Create a keras-friendly tf.data.Dataset from the in-memory dataset."""
def _data_gen():
for loc in locs:
yield (
self.data['embeddings'][loc],
tf.one_hot(self.data['label'][loc], self.num_classes),
)
ds = tf.data.Dataset.from_generator(
_data_gen,
output_types=(tf.float32, tf.int64),
output_shapes=(self.embedding_dim, self.num_classes),
)
if is_train:
ds = ds.shuffle(1024)
ds = ds.batch(batch_size)
return ds
def pool_time_axis(embeddings, pool_method, axis=1):
"""Apply pooling over the specified axis."""
if pool_method == 'mean':
if embeddings.shape[axis] == 0:
return embeddings.sum(axis=axis)
return embeddings.mean(axis=axis)
elif pool_method == 'max':
return embeddings.max(axis=axis)
elif pool_method == 'mid':
t = embeddings.shape[axis] // 2
return embeddings[:, t]
elif pool_method == 'flatten':
if len(embeddings.shape) != 3 and axis != 1:
raise ValueError(
'Can only flatten time for embeddings with shape [B, T, D].'
)
depth = embeddings.shape[-1]
time_steps = embeddings.shape[1]
return embeddings.reshape([embeddings.shape[0], time_steps * depth])
raise ValueError('Unrecognized reduction method.')
def _pad_audio(audio: np.ndarray, target_length: int) -> np.ndarray:
if len(audio.shape) > 1:
raise ValueError('audio should be a flat array.')
if audio.shape[0] > target_length:
return audio
pad_amount = target_length - audio.shape[0]
front = pad_amount // 2
back = pad_amount - front
return np.pad(audio, [(front, back)], 'constant')
def embed_dataset(
base_dir: str,
embedding_model: interface.EmbeddingModel,
time_pooling: str,
exclude_classes: Sequence[str] = (),
load_audio: bool = True,
target_sample_rate: int = -1,
audio_file_pattern: str = '*',
) -> Tuple[Sequence[str], Dict[str, np.ndarray]]:
"""Add embeddings to an eval dataset.
Embed a dataset, creating an in-memory copy of all data with embeddings added.
The base_dir should contain folders corresponding to classes, and each
sub-folder should contina audio files for the respective class.
Note that any audio files in the base_dir directly will be ignored.
Args:
base_dir: Directory contianing audio data.
embedding_model: Model for computing audio embeddings.
time_pooling: Key for time pooling strategy.
exclude_classes: Classes to skip.
load_audio: Whether to load audio into memory.
target_sample_rate: Resample loaded audio to this sample rate. If -1, loads
raw audio with no resampling. If -2, uses the embedding_model sample rate.
audio_file_pattern: The glob pattern to use for finding audio files within
the sub-folders.
Returns:
Ordered labels and a Dict contianing the entire embedded dataset.
"""
base_dir = epath.Path(base_dir)
labels = sorted([p.name for p in base_dir.glob('*') if p.is_dir()])
if not labels:
raise ValueError(
'No subfolders found in base directory. Audio will be '
'matched as "base_dir/*/*.wav", with the subfolders '
'indicating class names.'
)
labels = [label for label in labels if label not in exclude_classes]
if hasattr(embedding_model, 'window_size_s'):
window_size = int(
embedding_model.window_size_s * embedding_model.sample_rate
)
else:
window_size = -1
if target_sample_rate == -2:
target_sample_rate = embedding_model.sample_rate
merged = collections.defaultdict(list)
for label_idx, label in enumerate(labels):
label_hot = np.zeros([len(labels)], np.int32)
label_hot[label_idx] = 1
filepaths = [
fp.as_posix() for fp in (base_dir / label).glob(audio_file_pattern)
]
if not filepaths:
raise ValueError(
'No files matching {} were found in directory {}'.format(
audio_file_pattern, base_dir / label
)
)
audio_iterator = audio_utils.multi_load_audio_window(
filepaths, None, target_sample_rate, -1
)
for fp, audio in tqdm.tqdm(
zip(filepaths, audio_iterator), total=len(filepaths)
):
audio_size = audio.shape[0]
if window_size > audio_size:
audio = _pad_audio(audio, window_size)
audio = audio.astype(np.float32)
outputs = embedding_model.embed(audio)
if outputs.embeddings is None:
raise ValueError('Embedding model did not produce any embeddings!')
# If the audio was separated then the raw audio is in the first channel.
# Embedding shape is either [B, F, C, D] or [F, C, D] so channel is
# always -2.
channel_pooling = (
'squeeze' if outputs.embeddings.shape[-2] == 1 else 'first'
)
embeds = outputs.pooled_embeddings(time_pooling, channel_pooling)
merged['embeddings'].append(embeds)
filename = epath.Path(fp).name
merged['filename'].append(f'{label}/{filename}')
if load_audio:
merged['audio'].append(audio)
merged['label'].append(label_idx)
merged['label_str'].append(label)
merged['label_hot'].append(label_hot)
if load_audio:
# pad audio to ensure all the same length.
target_audio_len = np.max([a.shape[0] for a in merged['audio']])
merged['audio'] = [_pad_audio(a, target_audio_len) for a in merged['audio']]
outputs = {}
for k in merged.keys():
outputs[k] = np.stack(merged[k])
return labels, outputs
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification over embeddings."""
import dataclasses
from typing import Sequence
from chirp.models import metrics
from chirp.projects.multicluster import data_lib
import numpy as np
import tensorflow as tf
@dataclasses.dataclass
class ClassifierMetrics:
top1_accuracy: float
auc_roc: float
recall: float
cmap_value: float
class_maps: dict[str, float]
test_logits: dict[str, np.ndarray]
def get_two_layer_model(
num_hiddens: int, embedding_dim: int, num_classes: int, batch_norm: bool
) -> tf.keras.Model:
"""Create a simple two-layer Keras model."""
layers = [tf.keras.Input(shape=[embedding_dim])]
if batch_norm:
layers.append(tf.keras.layers.BatchNormalization())
layers += [
tf.keras.layers.Dense(num_hiddens, activation='relu'),
tf.keras.layers.Dense(num_classes),
]
model = tf.keras.Sequential(layers)
return model
def get_linear_model(embedding_dim: int, num_classes: int) -> tf.keras.Model:
"""Create a simple linear Keras model."""
model = tf.keras.Sequential([
tf.keras.Input(shape=[embedding_dim]),
tf.keras.layers.Dense(num_classes),
])
return model
def train_from_locs(
model: tf.keras.Model,
merged: data_lib.MergedDataset,
train_locs: Sequence[int],
test_locs: Sequence[int],
num_epochs: int,
batch_size: int,
learning_rate: float | None = None,
use_bce_loss: bool = True,
) -> ClassifierMetrics:
"""Trains a classification model over embeddings and labels."""
if use_bce_loss:
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
else:
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss=loss,
metrics=[
tf.keras.metrics.Precision(top_k=1, name='top1prec'),
tf.keras.metrics.AUC(
curve='ROC', name='auc', from_logits=True, multi_label=True
),
tf.keras.metrics.RecallAtPrecision(0.9, name='recall0.9'),
],
)
train_ds = merged.create_keras_dataset(train_locs, True, batch_size)
test_ds = merged.create_keras_dataset(test_locs, False, batch_size)
model.fit(train_ds, epochs=num_epochs, verbose=0)
# Compute overall metrics to avoid online approximation error in Keras.
test_logits = model.predict(test_ds, verbose=0, batch_size=8)
test_labels_hot = merged.data['label_hot'][test_locs]
test_labels = merged.data['label'][test_locs]
# Create a dictionary of test logits for each class.
test_logits_dict = {}
for k in set(test_labels):
lbl_locs = np.argwhere(test_labels == k)[:, 0]
test_logits_dict[k] = test_logits[lbl_locs, k]
top_logit_idxs = np.argmax(test_logits, axis=1)
top1acc = np.mean(test_labels == top_logit_idxs)
# TODO(tomdenton): Implement recall@precision metric.
recall = -1.0
cmap_value = metrics.cmap(test_logits, test_labels_hot)['macro']
auc_roc = metrics.roc_auc(test_logits, test_labels_hot)
return ClassifierMetrics(
top1acc,
auc_roc['macro'],
recall,
cmap_value,
auc_roc['individual'],
test_logits_dict,
)
def train_embedding_model(
model: tf.keras.Model,
merged: data_lib.MergedDataset,
train_ratio: float | None,
train_examples_per_class: int | None,
num_epochs: int,
random_seed: int,
batch_size: int,
learning_rate: float | None = None,
) -> ClassifierMetrics:
"""Trains a classification model over embeddings and labels."""
train_locs, test_locs, _ = merged.create_random_train_test_split(
train_ratio, train_examples_per_class, random_seed
)
test_metrics = train_from_locs(
model=model,
merged=merged,
train_locs=train_locs,
test_locs=test_locs,
num_epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
)
return test_metrics
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EfficientNet."""
import operator
from chirp.models import efficientnet
from jax import numpy as jnp
from jax import random
from jax import tree_util
from absl.testing import absltest
class EfficientNetTest(absltest.TestCase):
def test_efficientnet(self):
efficientnet_ = efficientnet.EfficientNet(
model=efficientnet.EfficientNetModel.B0, include_top=False
)
key = random.PRNGKey(0)
params_key, dropout_key = random.split(key)
inputs = jnp.ones((1, 224, 224, 3))
out, variables = efficientnet_.init_with_output(
{"dropout": dropout_key, "params": params_key}, inputs, train=True
)
self.assertEqual(out.shape, (1, 7, 7, 1280))
num_parameters = tree_util.tree_reduce(
operator.add, tree_util.tree_map(jnp.size, variables["params"])
)
# Keras has 7 more parameters due to the normalization of the inputs
self.assertEqual(num_parameters, 4_007_548)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for layers."""
import operator
from chirp.models import layers
from jax import numpy as jnp
from jax import random
from jax import tree_util
from absl.testing import absltest
class LayersTest(absltest.TestCase):
def test_mbconv(self):
# See table 2 in the MobileNetV2 paper
mbconv = layers.MBConv(
features=24, kernel_size=(3, 3), strides=2, expand_ratio=6
)
key = random.PRNGKey(0)
inputs = jnp.ones((1, 112, 112, 16))
outputs, variables = mbconv.init_with_output(
key, inputs, use_running_average=False
)
self.assertEqual(outputs.shape, (1, 56, 56, 24))
num_parameters = tree_util.tree_reduce(
operator.add, tree_util.tree_map(jnp.size, variables["params"])
)
expected_num_parameters = (
16 * 6 * 16
+ 3 * 3 * 6 * 16 # Expansion
+ 16 * 6 * 24 # Depthwise separable convolution # Reduction
)
self.assertEqual(num_parameters, expected_num_parameters)
def test_squeeze_and_excitation(self):
squeeze_and_excitation = layers.SqueezeAndExcitation()
key = random.PRNGKey(0)
inputs = jnp.ones((1, 112, 112, 16))
outputs, variables = squeeze_and_excitation.init_with_output(key, inputs)
self.assertEqual(outputs.shape, (1, 112, 112, 16))
num_parameters = tree_util.tree_reduce(
operator.add, tree_util.tree_map(jnp.size, variables["params"])
)
expected_num_parameters = (
16 * 16 // 4 + 16 // 4 + 16 // 4 * 16 + 16 # Squeeze # Excite
)
self.assertEqual(num_parameters, expected_num_parameters)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bird taxonomy dataset tests."""
import shutil
import tempfile
from unittest import mock
from chirp.data import filter_scrub_utils as fsu
from chirp.data import tfds_features
from chirp.data.bird_taxonomy import bird_taxonomy
from etils import epath
import numpy as np
import tensorflow_datasets as tfds
from absl.testing import absltest
class BirdTaxonomyTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for the bird taxonomy dataset."""
DATASET_CLASS = bird_taxonomy.BirdTaxonomy
BUILDER_CONFIG_NAMES_TO_TEST = [
config.name
for config in DATASET_CLASS.BUILDER_CONFIGS
if not (
'tiny' in config.name
or 'upstream' in config.name
or 'downstream' in config.name
or 'representative' in config.name
)
]
EXAMPLE_DIR = DATASET_CLASS.code_path.parent / 'placeholder_data'
DL_EXTRACT_RESULT = {'taxonomy_info': 'taxonomy_info.json'}
SPLITS = {'train': 4}
SKIP_CHECKSUMS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
# `self.create_tempdir()` raises an UnparsedFlagAccessError, which is why
# we use `tempdir` directly.
cls.tempdir = tempfile.mkdtemp()
_ = tfds.core.lazy_imports.librosa
cls.url_patcher = mock.patch.object(
cls.DATASET_CLASS, 'GCS_URL', epath.Path(cls.tempdir)
)
cls.query_patchers = []
for i in [3, 4]:
cls.query_patchers.append(
mock.patch.object(
cls.DATASET_CLASS.BUILDER_CONFIGS[i],
'data_processing_query',
fsu.QuerySequence([]),
)
)
cls.url_patcher.start()
for patcher in cls.query_patchers:
patcher.start()
subdir = epath.Path(cls.tempdir) / 'audio-data' / 'comter'
subdir.mkdir(parents=True)
for i in range(4):
tfds.core.lazy_imports.pydub.AudioSegment(
b'\0\1' * int(10_000 * 10),
metadata={
'channels': 1,
'sample_width': 2,
'frame_rate': 10_000,
'frame_width': 2,
},
).export(subdir / f'XC{i:05d}.mp3', format='mp3')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.url_patcher.stop()
for patcher in cls.query_patchers:
patcher.stop()
shutil.rmtree(cls.tempdir)
# TODO(bartvm): Remove when tensorflow-datasets PyPI package is fixed
@absltest.skip
def test_tags_are_valid(self):
pass
class Int16AsFloatTensorTest(absltest.TestCase):
"""Tests for the Int16AsFloatTensor feature."""
def test_encode_example(self):
feature = tfds_features.Int16AsFloatTensor(shape=[None], sample_rate=22050)
np.testing.assert_allclose(
feature.encode_example([-1.0, 0.0]),
np.array([-(2**15), 0], dtype=np.int16),
)
def test_reconstruct(self):
example_data = [-1.0, 0.0, 0.5]
feature = tfds_features.Int16AsFloatTensor(
shape=[None], sample_rate=22050, encoding=tfds.features.Encoding.ZLIB
)
np.testing.assert_allclose(
example_data,
feature.decode_example(feature.encode_example(example_data)),
)
def test_exception_on_non_float(self):
feature = tfds_features.Int16AsFloatTensor(shape=[None], sample_rate=22050)
self.assertRaises(
ValueError, feature.encode_example, np.array([-1, 0, 0], dtype=np.int16)
)
def test_exception_on_out_of_bound_values(self):
feature = tfds_features.Int16AsFloatTensor(shape=[None], sample_rate=22050)
self.assertRaises(ValueError, feature.encode_example, [1.0])
self.assertRaises(ValueError, feature.encode_example, [-1.5])
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pipeline."""
import os
import tempfile
from unittest import mock
from chirp.data import utils as data_utils
from chirp.models import frontend
from chirp.preprocessing import pipeline
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
from chirp.tests import fake_dataset
from jax import numpy as jnp
import numpy as np
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
class PipelineTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Test with two CPU devices.
os.environ['XLA_FLAGS'] = '--xla_force_host_platform_device_count=2'
data_dir = tempfile.TemporaryDirectory('data_dir').name
fake_builder = fake_dataset.FakeDataset(data_dir=data_dir)
fake_builder.download_and_prepare()
cls._builder = fake_builder
def test_mixin(self):
examples = {
'audio': tf.random.uniform([2, 100], dtype=tf.float32),
'segment_start': tf.convert_to_tensor([17, 64], dtype=tf.int64),
'segment_end': tf.convert_to_tensor([117, 164], dtype=tf.int64),
'label': tf.convert_to_tensor([[1], [2]], dtype=tf.int64),
'label_str': tf.convert_to_tensor(
[['placeholder'], ['placeholder']], dtype=tf.string
),
'bg_labels': tf.convert_to_tensor([[2, 3], [4, 5]], dtype=tf.int64),
'filename': tf.convert_to_tensor(
['placeholder', 'placeholder'], dtype=tf.string
),
}
ds = tf.data.Dataset.from_tensor_slices(examples)
ds = pipeline.Pipeline(
[pipeline.OnlyJaxTypes(), pipeline.MultiHot()], deterministic=True
)(ds, self._builder.info)
mixed_ds = pipeline.Pipeline([pipeline.MixAudio(1.0)], deterministic=True)(
ds, self._builder.info
)
mixed_example = next(mixed_ds.as_numpy_iterator())
np.testing.assert_allclose(
mixed_example['audio'], examples['audio'][0] + examples['audio'][1]
)
np.testing.assert_equal(
mixed_example['bg_labels'],
np.asarray(
[0, 0, 1, 1, 1, 1]
+ [0] * (self._builder.info.features['bg_labels'].num_classes - 6),
dtype=np.int32,
),
)
unmixed_ds = pipeline.Pipeline(
[pipeline.MixAudio(mixin_prob=0.0)], deterministic=True
)(ds, self._builder.info)
for x, y in tf.data.Dataset.zip((ds, unmixed_ds)).as_numpy_iterator():
for key in x:
if key in ('source_audio', 'segment_start', 'segment_end'):
np.testing.assert_equal(x[key], y[key][:1])
np.testing.assert_equal(np.zeros_like(x[key]), y[key][1:])
else:
np.testing.assert_equal(x[key], y[key], err_msg=f'{key} not equal')
def test_process_example(self):
sample_rate_hz = self._builder.info.features['audio'].sample_rate
audio_length_s = 6
audio_length_samples = sample_rate_hz * audio_length_s
input_gain = 10.0
window_size_s = 5
min_gain = 0.15
max_gain = 0.25
example = {
'audio': tf.random.uniform(
[audio_length_samples],
minval=-input_gain,
maxval=input_gain,
dtype=tf.float32,
),
'segment_start': tf.convert_to_tensor([17, 64], dtype=tf.int64),
'segment_end': tf.convert_to_tensor(
[17 + audio_length_samples, 64 + audio_length_samples],
dtype=tf.int64,
),
'label': tf.convert_to_tensor([1], dtype=tf.int64),
'label_str': tf.convert_to_tensor(['placeholder'], dtype=tf.string),
'bg_labels': tf.convert_to_tensor([2, 3], dtype=tf.int64),
'filename': tf.convert_to_tensor('placeholder', dtype=tf.string),
}
example = pipeline.OnlyJaxTypes()(example, self._builder.info)
example = pipeline.MultiHot()(example, self._builder.info)
# The bg_labels feature should be multi-hot encoded.
num_classes = self._builder.info.features['bg_labels'].feature.num_classes
np.testing.assert_equal(
example['bg_labels'].numpy(),
np.asarray([0, 0, 1, 1] + [0] * (num_classes - 4), dtype=np.int32),
)
example = pipeline.RandomSlice(window_size_s, names=('audio',))(
example, self._builder.info
)
example = pipeline.RandomNormalizeAudio(
min_gain, max_gain, names=('audio',)
)(example, self._builder.info)
# The audio feature should be trimmed to the requested length, and its
# maximum absolute value should be within [min_gain, max_gain].
audio = example['audio'].numpy()
self.assertEqual(audio.shape, (sample_rate_hz * window_size_s,))
# There is a constant value of 0.01 added to the denominator during
# normalization.
self.assertTrue(
input_gain / (input_gain + 0.01) * min_gain
<= np.abs(audio).max()
<= input_gain / (input_gain + 0.01) * max_gain
)
# The label feature should be one-hot encoded.
key = 'label'
np.testing.assert_equal(
example[key].numpy(),
np.asarray(
[0, 1, 0]
+ [0] * (self._builder.info.features[key].num_classes - 3),
dtype=np.int32,
),
)
# The label_str and filename features should be deleted.
for key in ('label_str', 'filename'):
self.assertNotIn(key, example)
def test_get_dataset(self):
test_pipeline = pipeline.Pipeline([
pipeline.OnlyJaxTypes(),
pipeline.MultiHot(),
pipeline.MixAudio(mixin_prob=0.25),
pipeline.Batch(8),
pipeline.RandomSlice(window_size=5),
pipeline.RandomNormalizeAudio(min_gain=0.15, max_gain=0.25),
])
for split in self._builder.info.splits.values():
dataset, _ = data_utils.get_dataset(
split.name,
dataset_directory=self._builder.data_dir,
pipeline=test_pipeline,
)
example = next(dataset.as_numpy_iterator())
self.assertLen(example['audio'].shape, 2)
self.assertLen(example['source_audio'].shape, 3)
self.assertSetEqual(
set(example.keys()),
{
'audio',
'source_audio',
'bg_labels',
'label',
'segment_start',
'segment_end',
'recording_id',
'segment_id',
},
)
# Check error raising when getting last dataset split without a pipeline.
with self.assertRaises(ValueError):
data_utils.get_dataset(
split.name, dataset_directory=self._builder.data_dir
)
def test_convert_bird_taxonomy_labels(self):
db = namespace_db.load_db()
np.random.seed(42)
source_class_list = db.class_lists['caples']
# Create a shuffled version of the source class list.
source_classes = list(source_class_list.classes)
np.random.shuffle(source_classes)
source_class_list = namespace.ClassList('ebird2021', source_classes)
target_class_list = db.class_lists['xenocanto']
self.assertLen(source_class_list.classes, 79)
self.assertLen(target_class_list.classes, 10932)
# example labels include three 'good' labels and many out of range labels.
# Good classes are 'amedip', 'comnig', 'macwar', and 'yerwar'.
# The following table lists their index in the source_class_list,
# label, genus, family, and order.
# 0 amedip cinclus cinclidae passeriformes
# 20 comnig chordeiles caprimulgidae caprimulgiformes
# 40 macwar geothlypis parulidae passeriformes
# 78 yerwar setophaga parulidae passeriformes
example = {
'label': tf.constant(
[
source_class_list.classes.index('amedip'),
source_class_list.classes.index('comnig'),
source_class_list.classes.index('macwar'),
source_class_list.classes.index('yerwar'),
79,
10655,
10932,
-1,
],
tf.int64,
),
'bg_labels': tf.constant([18, 1000], tf.int64),
}
converter = pipeline.ConvertBirdTaxonomyLabels(
target_class_list='xenocanto'
)
converted = converter.convert_features(example, source_class_list)
# Check species labels are correct.
for species in ('amedip', 'comnig', 'macwar', 'yerwar'):
target_idx = target_class_list.classes.index(species)
self.assertEqual(converted['label'][target_idx], 1)
for name, shape, num in (
('label', 10932, 4),
('bg_labels', 10932, 1),
('genus', 2333, 4),
('family', 249, 3),
('order', 41, 2),
):
print(name, shape, num, sum(converted[name].numpy()))
self.assertIn(name, converted)
self.assertLen(converted[name].shape, 1)
self.assertEqual(converted[name].shape[0], shape)
self.assertEqual(
converted[name].shape[0], converted[name + '_mask'].shape[0]
)
self.assertEqual(sum(converted[name].numpy()), num)
for image_name, image_size in (
('label_mask', 79),
('genus_mask', 62),
('family_mask', 30),
('order_mask', 11),
):
self.assertIn(image_name, converted)
self.assertLen(converted[image_name].shape, 1)
self.assertEqual(np.sum(converted[image_name].numpy()), image_size)
def test_labels_to_string(self):
examples = {
'segment_start': tf.convert_to_tensor([17, 64], dtype=tf.int64),
'label': tf.convert_to_tensor([[1], [2]], dtype=tf.int64),
'bg_labels': tf.convert_to_tensor([[2, 3], [4, 5]], dtype=tf.int64),
'filename': tf.convert_to_tensor(
['placeholder', 'placeholder'], dtype=tf.string
),
}
ds = tf.data.Dataset.from_tensor_slices(examples)
ds = pipeline.Pipeline(
[
pipeline.LabelsToString(),
]
)(
ds, self._builder.info
).batch(2)
class_names = self._builder.info.features['label'].feature.names
processed_example = next(ds.as_numpy_iterator())
np.testing.assert_equal(
processed_example['segment_start'], examples['segment_start']
)
np.testing.assert_equal(
processed_example['label'],
[class_names[1].encode('utf-8'), class_names[2].encode('utf-8')],
)
np.testing.assert_equal(
processed_example['bg_labels'],
[
f'{class_names[2]} {class_names[3]}'.encode('utf-8'),
f'{class_names[4]} {class_names[5]}'.encode('utf-8'),
],
)
np.testing.assert_equal(processed_example['filename'], examples['filename'])
def test_only_keep(self):
examples = {
'segment_start': tf.convert_to_tensor([17, 64], dtype=tf.int64),
'label': tf.convert_to_tensor([[1], [2]], dtype=tf.int64),
'bg_labels': tf.convert_to_tensor([[2, 3], [4, 5]], dtype=tf.int64),
'filename': tf.convert_to_tensor(
['placeholder', 'placeholder'], dtype=tf.string
),
}
ds = tf.data.Dataset.from_tensor_slices(examples)
ds = pipeline.Pipeline(
[
pipeline.OnlyKeep(names=['segment_start', 'bg_labels']),
]
)(ds, self._builder.info).batch(2)
processed_example = next(ds.as_numpy_iterator())
self.assertSameElements(
processed_example.keys(), ['segment_start', 'bg_labels']
)
np.testing.assert_equal(
processed_example['segment_start'], examples['segment_start']
)
np.testing.assert_equal(
processed_example['bg_labels'], examples['bg_labels']
)
@parameterized.parameters(
None,
frontend.LogScalingConfig(floor=1e-5, scalar=0.1),
)
def test_melspec(self, scaling_config):
batch_size = 3
sample_rate_hz = 22050
time_size = 5 * sample_rate_hz
audio = tf.math.sin(tf.linspace(0.0, 440 * jnp.pi, time_size))
noise = 0.01 * tf.random.normal((batch_size, time_size))
signal = audio + noise
model = frontend.MelSpectrogram(
features=160,
stride=sample_rate_hz // 100,
kernel_size=512, # ~0.08 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=scaling_config,
)
melspec = model.apply({}, jnp.array(signal))
melspec_tf = pipeline.MelSpectrogram(
features=160,
stride=sample_rate_hz // 100,
kernel_size=512, # ~0.08 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=scaling_config,
)({'audio': signal}, dataset_info=None)['audio']
np.testing.assert_allclose(melspec, melspec_tf.numpy(), atol=1e-5)
def test_resample_audio(self):
original_dataset = self._builder.as_dataset('train')
original_examples = next(
original_dataset.batch(len(original_dataset)).as_numpy_iterator()
)
# Six seconds at 32kHz, gives 192000 samples.
original_length = original_examples['audio'].shape[1]
original_sample_rate = self._builder.info.features['audio'].sample_rate
resampled_examples = pipeline.ResampleAudio(target_sample_rate=16000)(
original_examples, self._builder.info
)
expected_length = int(16000 * original_length / original_sample_rate)
self.assertEqual(
resampled_examples['audio'].shape[0],
original_examples['audio'].shape[0],
)
self.assertEqual(resampled_examples['audio'].shape[1], expected_length)
self.assertLen(resampled_examples['audio'].shape, 2)
@parameterized.named_parameters(('pad_end', True), ('no_pad_end', False))
def test_extract_strided_slices(self, pad_end):
sample_rate = self._builder.info.features['audio'].sample_rate
length_sec = 5
stride_sec = 2.5
length = int(length_sec * sample_rate)
stride = int(stride_sec * sample_rate)
original_dataset = self._builder.as_dataset('train')
original_examples = next(
original_dataset.batch(len(original_dataset)).as_numpy_iterator()
)
dataset = pipeline.ExtractStridedWindows(
window_length_sec=length_sec,
window_stride_sec=stride_sec,
pad_end=pad_end,
)(original_dataset, self._builder.info)
examples = next(dataset.batch(len(dataset)).as_numpy_iterator())
# The fake_dataset builder creates 6s recordings. This results in one full
# slice and two zero-padded slices when using a 5s window with stride 2.5s.
# We expect one slice per example if padding='VALID' and three slices per
# example otherwise.
self.assertLen(
dataset, len(original_dataset) * 3 if pad_end else len(original_dataset)
)
# Verify slices have the expected length.
self.assertEqual(examples['audio'].shape[1], length)
# The segment start and end indices should reflect the window sliding over
# the audio.
np.testing.assert_equal(
examples['segment_start'],
[0, stride, 2 * stride] * len(original_dataset) if pad_end else 0,
)
np.testing.assert_equal(
examples['segment_end'],
[length, length + stride, length + 2 * stride] * len(original_dataset)
if pad_end
else length,
)
# The segment IDs should reflect the sliding window's position.
np.testing.assert_equal(
examples['segment_id'],
[0, 1, 2] * len(original_dataset) if pad_end else 0,
)
# The other features should be replicated across slices.
other_feature_names = [
k
for k in original_examples
if k not in ('audio', 'segment_start', 'segment_end', 'segment_id')
]
for key in other_feature_names:
np.testing.assert_equal(
examples[key],
np.repeat(original_examples[key], 3, axis=0)
if pad_end
else original_examples[key],
)
# With a recording length of 6s, a window size of 5s a window stride of
# 2.5s, and with end-padding , we expect the slices to cycle between a full
# slice, a slice with 1.5s of zero padding, and a slice with 4s of zero
# padding.
if pad_end:
np.testing.assert_equal(
examples['audio'][1::3, -int(1.5 * sample_rate) :], 0
)
np.testing.assert_equal(
examples['audio'][2::3, -int(4.0 * sample_rate) :], 0
)
def test_densely_annotate_windows_no_overlap_threshold(self):
# Sampling rate is 10, so divide the timestamps by 10 for seconds.
original_example = {
'segment_start': np.array(10, dtype=np.int64),
'segment_end': np.array(50, dtype=np.int64),
'annotation_start': np.array([10, 30, 45], dtype=np.int64),
'annotation_end': np.array([20, 60, 90], dtype=np.int64),
'label': np.array([0, 1, 2], dtype=np.int64),
}
fake_dataset_info = mock.MagicMock(
features={
'audio': mock.MagicMock(sample_rate=10),
'label': mock.MagicMock(names=('dowwoo', 'daejun', 'pilwoo')),
}
)
original_dataset = tf.data.Dataset.from_tensors(original_example)
annotated_dataset = pipeline.DenselyAnnotateWindows(
overlap_threshold_sec=0
)(original_dataset, fake_dataset_info)
annotated_dataset = next(annotated_dataset.as_numpy_iterator())
expected_dataset = {
'segment_start': np.array(10, dtype=np.int64),
'segment_end': np.array(50, dtype=np.int64),
'annotation_start': np.array([10, 30, 45], dtype=np.int64),
'annotation_end': np.array([20, 60, 90], dtype=np.int64),
'label': np.array([0, 1, 2], dtype=np.int64),
}
for key, expected_value in expected_dataset.items():
np.testing.assert_equal(expected_value, annotated_dataset[key])
def test_densely_annotate_windows_overlap_1sec(self):
# Sampling rate is 10, so divide the timestamps by 10 for seconds.
original_example = {
'segment_start': np.array(10, dtype=np.uint64),
'segment_end': np.array(50, dtype=np.uint64),
'annotation_start': np.array([10, 30, 45], dtype=np.uint64),
'annotation_end': np.array([20, 60, 90], dtype=np.uint64),
'label': np.array([0, 1, 2], dtype=np.int64),
}
fake_dataset_info = mock.MagicMock(
features={
'audio': mock.MagicMock(sample_rate=10),
'label': mock.MagicMock(names=('dowwoo', 'daejun', 'pilwoo')),
}
)
original_dataset = tf.data.Dataset.from_tensors(original_example)
annotated_dataset = pipeline.DenselyAnnotateWindows(
overlap_threshold_sec=1
)(original_dataset, fake_dataset_info)
annotated_dataset = next(annotated_dataset.as_numpy_iterator())
expected_dataset = {
'segment_start': np.array(10, dtype=np.uint64),
'segment_end': np.array(50, dtype=np.uint64),
# The annotations for labels 0 and 1 are longer than 1s, so are kept.
# The annotation metadata for label 2 is all zeros.
'annotation_start': np.array([10, 30, 0], dtype=np.uint64),
'annotation_end': np.array([20, 60, 0], dtype=np.uint64),
'intersection_size': np.array([10, 20, 0], dtype=np.uint64),
'annotation_length': np.array([10, 30, 0], dtype=np.uint64),
'label': np.array([0, 1], dtype=np.int64),
}
for key, expected_value in expected_dataset.items():
print(key, expected_value, annotated_dataset[key])
np.testing.assert_equal(expected_value, annotated_dataset[key])
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaxonomyModel."""
from chirp.models import efficientnet
from chirp.models import frontend
from chirp.models import taxonomy_model
import flax
import jax
from jax import numpy as jnp
from jax import random
from absl.testing import absltest
class TaxonomyModelTest(absltest.TestCase):
def setUp(self):
super().setUp()
sample_rate_hz = 11025
self.input_size = sample_rate_hz
self.model = taxonomy_model.TaxonomyModel(
num_classes={"label": 10},
encoder=efficientnet.EfficientNet(
model=efficientnet.EfficientNetModel.B0
),
frontend=frontend.MorletWaveletTransform(
features=160,
stride=sample_rate_hz // 100,
kernel_size=2_048,
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=frontend.PCENScalingConfig(),
),
taxonomy_loss_weight=0.0,
)
self.key = random.PRNGKey(0)
self.variables = self.model.init(
self.key, jnp.zeros((1, self.input_size)), train=False
)
def test_dropout(self):
"""Ensure that two passes with train=True provide different outputs."""
fake_audio = 10 * random.normal(self.key, (1, 11025))
rng, key = random.split(self.key)
output1 = self.model.apply(
self.variables,
fake_audio,
train=True,
use_running_average=True,
rngs={"dropout": rng},
)
key, rng = random.split(key)
output2 = self.model.apply(
self.variables,
fake_audio,
train=True,
use_running_average=True,
rngs={"dropout": rng},
)
self.assertNotEqual(
jnp.squeeze(output1.label).tolist(), jnp.squeeze(output2.label).tolist()
)
def test_batch_norm(self):
"""Ensure that the state is updated by BN layers."""
fake_audio = 10 * random.normal(self.key, (2, 11025))
rng, _ = random.split(self.key)
model_state, _ = flax.core.pop(self.variables, "params")
_, updated_state = self.model.apply(
self.variables,
fake_audio,
train=False,
use_running_average=False,
mutable=list(model_state.keys()),
rngs={"dropout": rng},
)
for x, y in zip(
jax.tree_util.tree_leaves(model_state["batch_stats"]),
jax.tree_util.tree_leaves(updated_state["batch_stats"]),
):
self.assertNotEqual(x.tolist(), y.tolist())
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for quantizers."""
from chirp.models import quantizers
import flax
import jax
from jax import numpy as jnp
import numpy as np
from absl.testing import absltest
class QuantizersTest(absltest.TestCase):
def test_vector_quantizer(self):
num_centroids = 2
embedding_dim = 8
vq = quantizers.VectorQuantizer(
num_centroids=num_centroids,
commitment_loss=0.0,
ema_decay=0.99,
demean=True,
rescale=True,
)
key = jax.random.PRNGKey(17)
rngs = {}
rngs['params'], key = jax.random.split(key)
inputs = jnp.ones([2, 4, embedding_dim])
params = vq.init(rngs, inputs, train=False, mutable=True)
# Check that the cluster assignment counts are all 1's at init time.
np.testing.assert_allclose(
params['quantizer']['cluster_counts'], jnp.ones([num_centroids])
)
# Update a few times.
for _ in range(5):
_, params = vq.apply(params, inputs, train=True, mutable=True)
# We have quantized the same zero vector over five batches, eight times in
# each batch. Check that we have correct EMA estimates of the assignment
# counts and feature mean.
expected = jnp.array([1.0, 1.0])
expected_means = jnp.zeros([embedding_dim])
for _ in range(5):
expected = 0.99 * expected + 0.01 * jnp.array([8.0, 0.0])
expected_means = 0.99 * expected_means + 0.01 * jnp.ones([embedding_dim])
np.testing.assert_allclose(params['quantizer']['cluster_counts'], expected)
np.testing.assert_allclose(
params['quantizer']['feature_means'], expected_means
)
np.testing.assert_allclose(
params['quantizer']['feature_stdev'], 0.0, atol=1e-6
)
def test_refresh_codebooks(self):
num_centroids = 2
embedding_dim = 8
vq = quantizers.VectorQuantizer(
num_centroids=num_centroids,
commitment_loss=0.0,
ema_decay=0.99,
demean=True,
)
key = jax.random.PRNGKey(17)
rngs = {}
rngs['params'], key = jax.random.split(key)
inputs = jnp.ones([2, 4, embedding_dim])
params = vq.init(rngs, inputs, train=False, mutable=True)
model_state, model_params = flax.core.pop(params, 'params')
# Refresh with threshold 0.0, which should leave the params unchanged.
updated_params, updated_state = quantizers.refresh_codebooks(
model_params, model_state, key, 0.0
)
flat_params = flax.traverse_util.flatten_dict(model_params)
flat_updated_params = flax.traverse_util.flatten_dict(updated_params)
flat_state = flax.traverse_util.flatten_dict(model_state)
flat_updated_state = flax.traverse_util.flatten_dict(updated_state)
for k in flat_params:
np.testing.assert_allclose(flat_params[k], flat_updated_params[k])
for k in flat_state:
np.testing.assert_allclose(flat_state[k], flat_updated_state[k])
# Update the VQs to change the usage counts.
_, params = vq.apply(params, inputs, train=True, mutable=True)
# Refresh the codebooks with threshold 2.0, which should cause codebooks
# to update.
model_state, model_params = flax.core.pop(params, 'params')
updated_params, updated_state = quantizers.refresh_codebooks(
model_params, model_state, key, 2.0
)
flat_params = flax.traverse_util.flatten_dict(model_params)
flat_updated_params = flax.traverse_util.flatten_dict(updated_params)
flat_state = flax.traverse_util.flatten_dict(model_state)
flat_updated_state = flax.traverse_util.flatten_dict(updated_state)
for k in flat_params:
diff = np.sum(np.abs(flat_params[k] - flat_updated_params[k]))
self.assertGreater(diff, 0.1)
def test_product_quantizer(self):
num_centroids = 2
embedding_dim = 16
num_sections = 4
base_quantizers = [
quantizers.VectorQuantizer(
num_centroids=num_centroids,
commitment_loss=0.0,
ema_decay=0.99,
demean=True,
)
for _ in range(num_sections)
]
pvq = quantizers.ProductQuantizer(base_quantizers, pca_dim=8)
key = jax.random.PRNGKey(17)
rngs = {}
rngs['params'], key = jax.random.split(key)
inputs = jnp.ones([2, 4, embedding_dim])
params = pvq.init(rngs, inputs, train=False, mutable=True)
# Just check that it runs for now.
quantizer_outputs, _ = pvq.apply(params, inputs, train=True, mutable=True)
# hubert_train.py expects the quantization loss to be of dim 3, e.g.
# [batch size, num frames, num clusters].
self.assertLen(quantizer_outputs.quantization_loss.shape, 3)
self.assertSequenceEqual(quantizer_outputs.quantized.shape, inputs.shape)
self.assertSequenceEqual(
quantizer_outputs.nn_idx.shape, [num_sections, 2, 4]
)
def test_residual_quantizer(self):
num_centroids = 2
embedding_dim = 8
num_sections = 4
base_quantizers = [
quantizers.VectorQuantizer(
num_centroids=num_centroids,
commitment_loss=0.0,
ema_decay=0.99,
demean=True,
)
for _ in range(num_sections)
]
rvq = quantizers.ResidualQuantizer(base_quantizers)
key = jax.random.PRNGKey(17)
rngs = {}
rngs['params'], key = jax.random.split(key)
inputs = jnp.ones([2, 4, embedding_dim])
params = rvq.init(rngs, inputs, train=False, mutable=True)
# Just check that it runs for now.
quantizer_outputs, _ = rvq.apply(params, inputs, train=True, mutable=True)
# hubert_train.py expects the quantization loss to be of dim 3, e.g.
# [batch size, num frames, num clusters].
self.assertLen(quantizer_outputs.quantization_loss.shape, 3)
self.assertSequenceEqual(quantizer_outputs.quantized.shape, inputs.shape)
self.assertSequenceEqual(
quantizer_outputs.nn_idx.shape, [num_sections, 2, 4]
)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for inference library."""
import os
import tempfile
import apache_beam as beam
from apache_beam.testing import test_pipeline
from chirp import path_utils
from chirp.inference import embed_lib
from chirp.inference import interface
from chirp.inference import models
from chirp.inference import tf_examples
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
from etils import epath
from ml_collections import config_dict
import numpy as np
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
def _make_output_head_model(model_path: str, embedding_dim: int = 1280):
classes = ('speech', 'birdsong', 'unknown')
model = tf.keras.Sequential([
tf.keras.Input(shape=[embedding_dim]),
tf.keras.layers.Dense(len(classes)),
])
class_list = namespace.ClassList('custom', classes)
return interface.LogitsOutputHead(
model_path, 'other_label', model, class_list
)
class InferenceTest(parameterized.TestCase):
@parameterized.product(
make_embeddings=(True, False),
make_logits=(True, False),
make_separated_audio=(True, False),
write_embeddings=(True, False),
write_logits=(True, False),
write_separated_audio=(True, False),
write_raw_audio=(True, False),
)
def test_embed_fn(
self,
make_embeddings,
make_logits,
make_separated_audio,
write_embeddings,
write_logits,
write_raw_audio,
write_separated_audio,
):
model_kwargs = {
'sample_rate': 16000,
'embedding_size': 128,
'make_embeddings': make_embeddings,
'make_logits': make_logits,
'make_separated_audio': make_separated_audio,
}
embed_fn = embed_lib.EmbedFn(
write_embeddings=write_embeddings,
write_logits=write_logits,
write_separated_audio=write_separated_audio,
write_raw_audio=write_raw_audio,
model_key='placeholder_model',
model_config=model_kwargs,
file_id_depth=0,
)
embed_fn.setup()
self.assertIsNotNone(embed_fn.embedding_model)
test_wav_path = os.fspath(
path_utils.get_absolute_path(
'tests/testdata/tfds_builder_wav_directory_test/clap.wav'
)
)
source_info = embed_lib.SourceInfo(test_wav_path, 0, 10)
example = embed_fn.process(source_info, crop_s=10.0)[0]
serialized = example.SerializeToString()
parser = tf_examples.get_example_parser(
logit_names=['label', 'other_label']
)
got_example = parser(serialized)
self.assertIsNotNone(got_example)
self.assertEqual(got_example[tf_examples.FILE_NAME], 'clap.wav')
if make_embeddings and write_embeddings:
embedding = got_example[tf_examples.EMBEDDING]
self.assertSequenceEqual(
embedding.shape, got_example[tf_examples.EMBEDDING_SHAPE]
)
else:
self.assertEqual(got_example[tf_examples.EMBEDDING].shape, (0,))
if make_logits and write_logits:
self.assertSequenceEqual(
got_example['label'].shape, got_example['label_shape']
)
self.assertSequenceEqual(
got_example['other_label'].shape, got_example['other_label_shape']
)
else:
self.assertEqual(got_example['label'].shape, (0,))
if make_separated_audio and write_separated_audio:
separated_audio = got_example[tf_examples.SEPARATED_AUDIO]
self.assertSequenceEqual(
separated_audio.shape, got_example[tf_examples.SEPARATED_AUDIO_SHAPE]
)
else:
self.assertEqual(got_example[tf_examples.SEPARATED_AUDIO].shape, (0,))
if write_raw_audio:
raw_audio = got_example[tf_examples.RAW_AUDIO]
self.assertSequenceEqual(
raw_audio.shape, got_example[tf_examples.RAW_AUDIO_SHAPE]
)
else:
self.assertEqual(got_example[tf_examples.RAW_AUDIO].shape, (0,))
def test_keyed_write_logits(self):
"""Test that EmbedFn writes only the desired logits if specified."""
write_logits = ('other_label',)
model_kwargs = {
'sample_rate': 16000,
'embedding_size': 128,
'make_embeddings': True,
'make_logits': ('label', 'other_label'),
'make_separated_audio': False,
}
embed_fn = embed_lib.EmbedFn(
write_embeddings=True,
write_logits=write_logits,
write_separated_audio=False,
write_raw_audio=False,
model_key='placeholder_model',
model_config=model_kwargs,
file_id_depth=0,
)
embed_fn.setup()
self.assertIsNotNone(embed_fn.embedding_model)
test_wav_path = os.fspath(
path_utils.get_absolute_path(
'tests/testdata/tfds_builder_wav_directory_test/clap.wav'
)
)
source_info = embed_lib.SourceInfo(test_wav_path, 0, 10)
example = embed_fn.process(source_info, crop_s=10.0)[0]
serialized = example.SerializeToString()
parser = tf_examples.get_example_parser(
logit_names=['label', 'other_label']
)
got_example = parser(serialized)
self.assertIsNotNone(got_example)
self.assertEqual(got_example[tf_examples.FILE_NAME], 'clap.wav')
self.assertSequenceEqual(
got_example['other_label'].shape, got_example['other_label_shape']
)
self.assertEqual(got_example['label'].shape, (0,))
def test_logits_output_head(self):
base_model = models.PlaceholderModel(
sample_rate=22050,
make_embeddings=True,
make_logits=False,
make_separated_audio=True,
)
logits_model = _make_output_head_model(
'/tmp/logits_model', embedding_dim=128
)
base_outputs = base_model.embed(np.zeros(5 * 22050))
updated_outputs = logits_model.add_logits(base_outputs)
self.assertSequenceEqual(
updated_outputs.logits['other_label'].shape,
(5, 3),
)
# Check that we /only/ have the new logits, since make_logits=False
self.assertNotIn('label', updated_outputs.logits)
# Save and restore the model.
with tempfile.TemporaryDirectory() as logits_model_dir:
logits_model.save_model(logits_model_dir, '')
restore_config = config_dict.ConfigDict({
'model_path': logits_model_dir,
'logits_key': 'other_label',
})
restored_model = interface.LogitsOutputHead.from_config(restore_config)
reupdated_outputs = restored_model.add_logits(base_outputs)
error = np.mean(
np.abs(
reupdated_outputs.logits['other_label']
- updated_outputs.logits['other_label']
)
)
self.assertLess(error, 1e-5)
def test_embed_short_audio(self):
"""Test that EmbedFn handles audio shorter than the model window_size_s."""
model_kwargs = {
'sample_rate': 16000,
'embedding_size': 128,
'make_embeddings': True,
'make_logits': False,
'make_separated_audio': False,
'window_size_s': 5.0,
}
embed_fn = embed_lib.EmbedFn(
write_embeddings=True,
write_logits=False,
write_separated_audio=False,
write_raw_audio=False,
model_key='placeholder_model',
model_config=model_kwargs,
min_audio_s=1.0,
file_id_depth=0,
)
embed_fn.setup()
self.assertIsNotNone(embed_fn.embedding_model)
test_wav_path = os.fspath(
path_utils.get_absolute_path(
'tests/testdata/tfds_builder_wav_directory_test/clap.wav'
)
)
source_info = embed_lib.SourceInfo(test_wav_path, 0, 10)
# Crop to 3.0s to ensure we can handle short audio examples.
example = embed_fn.process(source_info, crop_s=3.0)[0]
serialized = example.SerializeToString()
parser = tf_examples.get_example_parser(logit_names=['label'])
got_example = parser(serialized)
self.assertIsNotNone(got_example)
embedding = got_example[tf_examples.EMBEDDING]
self.assertSequenceEqual(
embedding.shape, got_example[tf_examples.EMBEDDING_SHAPE]
)
def test_tfrecord_multiwriter(self):
output_dir = epath.Path(tempfile.TemporaryDirectory().name)
output_dir.mkdir(parents=True, exist_ok=True)
fake_examples = []
for idx in range(20):
outputs = interface.InferenceOutputs(
embeddings=np.zeros([10, 2, 8], dtype=np.float32), batched=False
)
fake_examples.append(
tf_examples.model_outputs_to_tf_example(
model_outputs=outputs,
file_id=f'fake_audio_{idx:02d}',
audio=np.zeros([100]),
timestamp_offset_s=0.0,
write_embeddings=True,
write_logits=False,
write_separated_audio=False,
write_raw_audio=False,
)
)
with tf_examples.EmbeddingsTFRecordMultiWriter(
output_dir.as_posix()
) as writer:
for ex in fake_examples:
serialized = ex.SerializeToString()
writer.write(serialized)
fns = [fn for fn in output_dir.glob('embeddings-*')]
ds = tf.data.TFRecordDataset(fns)
parser = tf_examples.get_example_parser()
ds = ds.map(parser)
got_examples = [ex for ex in ds.as_numpy_iterator()]
self.assertEqual(len(got_examples), len(fake_examples))
want_ids = [f'fake_audio_{idx:02d}' for idx in range(20)]
got_ids = sorted([ex['filename'].decode('utf-8') for ex in got_examples])
self.assertSequenceEqual(want_ids, got_ids)
@parameterized.product(
config_name=(
'raw_soundscapes',
'separate_soundscapes',
'birdnet_soundscapes',
),
)
def test_load_configs(self, config_name):
config = embed_lib.get_config(config_name)
self.assertIsNotNone(config)
def test_handcrafted_features(self):
model = models.HandcraftedFeaturesModel.beans_baseline()
audio = np.zeros([5 * 32000], dtype=np.float32)
outputs = model.embed(audio)
# Five frames because we have 5s of audio with window 1.0 and hope 1.0.
# Beans aggrregation with mfccs creates 20 MFCC channels, and then computes
# four summary statistics for each, giving a total of 80 output channels.
self.assertSequenceEqual([5, 1, 80], outputs.embeddings.shape)
def test_sep_embed_wrapper(self):
"""Check that the joint-model wrapper works as intended."""
separator = models.PlaceholderModel(
sample_rate=22050,
make_embeddings=False,
make_logits=False,
make_separated_audio=True,
)
db = namespace_db.load_db()
target_class_list = db.class_lists['high_sierras']
embeddor = models.PlaceholderModel(
sample_rate=22050,
make_embeddings=True,
make_logits=True,
make_separated_audio=False,
target_class_list=target_class_list,
)
fake_config = config_dict.ConfigDict()
sep_embed = models.SeparateEmbedModel(
sample_rate=22050,
taxonomy_model_tf_config=fake_config,
separator_model_tf_config=fake_config,
separation_model=separator,
embedding_model=embeddor,
)
audio = np.zeros(5 * 22050, np.float32)
outputs = sep_embed.embed(audio)
# The PlaceholderModel produces one embedding per second, and we have
# five seconds of audio, with two separated channels, plus the channel
# for the raw audio.
# Note that this checks that the sample-rate conversion between the
# separation model and embedding model has worked correctly.
self.assertSequenceEqual(
outputs.embeddings.shape, [5, 3, embeddor.embedding_size]
)
# The Sep+Embed model takes the max logits over the channel dimension.
self.assertSequenceEqual(
outputs.logits['label'].shape, [5, len(target_class_list.classes)]
)
def test_pooled_embeddings(self):
outputs = interface.InferenceOutputs(
embeddings=np.zeros([10, 2, 8]), batched=False
)
batched_outputs = interface.InferenceOutputs(
embeddings=np.zeros([3, 10, 2, 8]), batched=True
)
# Check that no-op is no-op.
non_pooled = outputs.pooled_embeddings('', '')
self.assertSequenceEqual(non_pooled.shape, outputs.embeddings.shape)
batched_non_pooled = batched_outputs.pooled_embeddings('', '')
self.assertSequenceEqual(
batched_non_pooled.shape, batched_outputs.embeddings.shape
)
for pooling_method in interface.POOLING_METHODS:
if pooling_method == 'squeeze':
# The 'squeeze' pooling method throws an exception if axis size is > 1.
with self.assertRaises(ValueError):
outputs.pooled_embeddings(pooling_method, '')
continue
elif pooling_method == 'flatten':
# Concatenates over the target axis.
time_pooled = outputs.pooled_embeddings(pooling_method, '')
self.assertSequenceEqual(time_pooled.shape, [2, 80])
continue
time_pooled = outputs.pooled_embeddings(pooling_method, '')
self.assertSequenceEqual(time_pooled.shape, [2, 8])
batched_time_pooled = batched_outputs.pooled_embeddings(
pooling_method, ''
)
self.assertSequenceEqual(batched_time_pooled.shape, [3, 2, 8])
channel_pooled = outputs.pooled_embeddings('', pooling_method)
self.assertSequenceEqual(channel_pooled.shape, [10, 8])
batched_channel_pooled = batched_outputs.pooled_embeddings(
'', pooling_method
)
self.assertSequenceEqual(batched_channel_pooled.shape, [3, 10, 8])
both_pooled = outputs.pooled_embeddings(pooling_method, pooling_method)
self.assertSequenceEqual(both_pooled.shape, [8])
batched_both_pooled = batched_outputs.pooled_embeddings(
pooling_method, pooling_method
)
self.assertSequenceEqual(batched_both_pooled.shape, [3, 8])
def test_beam_pipeline(self):
"""Check that we can write embeddings to TFRecord file."""
test_wav_path = os.fspath(
path_utils.get_absolute_path(
'tests/testdata/tfds_builder_wav_directory_test/clap.wav'
)
)
source_infos = [embed_lib.SourceInfo(test_wav_path, 0, 10)]
base_pipeline = test_pipeline.TestPipeline()
tempdir = tempfile.gettempdir()
output_dir = os.path.join(tempdir, 'testBeamStuff_output')
model_kwargs = {
'sample_rate': 16000,
'embedding_size': 128,
'make_embeddings': True,
'make_logits': False,
'make_separated_audio': False,
}
embed_fn = embed_lib.EmbedFn(
write_embeddings=False,
write_logits=False,
write_separated_audio=False,
write_raw_audio=False,
model_key='placeholder_model',
model_config=model_kwargs,
)
metrics = embed_lib.build_run_pipeline(
base_pipeline, output_dir, source_infos, embed_fn
)
counter = metrics.query(
beam.metrics.MetricsFilter().with_name('examples_processed')
)['counters']
self.assertEqual(counter[0].result, 1)
print(metrics)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train."""
import os
import tempfile
from chirp import config_utils
from chirp.configs import baseline
from chirp.configs import baseline_attention
from chirp.configs import baseline_mel_conformer
from chirp.configs import config_globals
from chirp.data import utils as data_utils
from chirp.models import efficientnet
from chirp.models import frontend
from chirp.preprocessing import pipeline
from chirp.taxonomy import namespace
from chirp.tests import fake_dataset
from chirp.train import classifier
from clu import checkpoint
from flax import linen as nn
import jax
from jax import numpy as jnp
from ml_collections import config_dict
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
TEST_WINDOW_S = 1
class ConstantEncoder(nn.Module):
"""A no-op encoder for quickly testing train+test loops."""
output_dim: int = 32
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool, # pylint: disable=redefined-outer-name
use_running_average: bool,
) -> jnp.ndarray:
return jnp.zeros([inputs.shape[0], self.output_dim])
class TrainTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.train_dir = tempfile.TemporaryDirectory("train_dir").name
self.data_dir = tempfile.TemporaryDirectory("data_dir").name
fake_builder = fake_dataset.FakeDataset(data_dir=self.data_dir)
fake_builder.download_and_prepare()
self.builder = fake_builder
def _get_test_dataset(self, config):
ds, dataset_info = data_utils.get_dataset(
"train",
dataset_directory=self.builder.data_dir,
pipeline=config.train_dataset_config.pipeline,
)
return ds, dataset_info
def _get_test_config(self, config_module=baseline) -> config_dict.ConfigDict:
"""Reduces test config sizes to avoid memory blowouts."""
config = config_module.get_config()
config.sample_rate_hz = 11_025
config.num_train_steps = 1
config.train_window_size_s = TEST_WINDOW_S
config.eval_window_size_s = TEST_WINDOW_S
config.train_config.log_every_steps = 1
config.train_config.checkpoint_every_steps = 1
config.eval_config.eval_steps_per_checkpoint = 1
config = config_utils.parse_config(config, config_globals.get_globals())
config.train_dataset_config.pipeline = pipeline.Pipeline(
ops=[
pipeline.OnlyJaxTypes(),
pipeline.ConvertBirdTaxonomyLabels(
source_namespace="ebird2021",
target_class_list="xenocanto",
add_taxonomic_labels=True,
),
pipeline.MixAudio(mixin_prob=0.0),
pipeline.Batch(batch_size=1, split_across_devices=True),
pipeline.RandomSlice(window_size=TEST_WINDOW_S),
pipeline.RandomNormalizeAudio(min_gain=0.15, max_gain=0.25),
]
)
config.eval_dataset_config.pipeline = pipeline.Pipeline(
ops=[
pipeline.OnlyJaxTypes(),
pipeline.MultiHot(),
pipeline.Batch(batch_size=1, split_across_devices=True),
pipeline.Slice(
window_size=TEST_WINDOW_S, start=0.5, names=("audio",)
),
pipeline.NormalizeAudio(target_gain=0.2, names=("audio",)),
]
)
return config
def _add_const_model_config(self, config):
config.init_config.model_config.encoder = ConstantEncoder(output_dim=32)
return config
def _add_b0_model_config(self, config):
config.init_config.model_config.encoder = efficientnet.EfficientNet(
efficientnet.EfficientNetModel.B0
)
return config
def _add_pcen_melspec_frontend(self, config):
config.init_config.model_config.frontend = frontend.MelSpectrogram(
features=32,
stride=32_000 // 25,
kernel_size=2_560,
sample_rate=32_000,
freq_range=(60, 10_000),
scaling_config=frontend.PCENScalingConfig(conv_width=256),
)
return config
def test_config_structure(self):
# Check that the test config and model config have similar structure.
# This helps ensure that the test configs don't drift too far from the
# actual configs we use for training.
raw_config = baseline.get_config()
parsed_config = config_utils.parse_config(
raw_config, config_globals.get_globals()
)
test_config = self._get_test_config()
test_config = self._add_pcen_melspec_frontend(test_config)
test_config = self._add_b0_model_config(test_config)
print(jax.tree_util.tree_structure(parsed_config.to_dict()))
print(jax.tree_util.tree_structure(test_config.to_dict()))
self.assertEqual(
jax.tree_util.tree_structure(parsed_config.to_dict()),
jax.tree_util.tree_structure(test_config.to_dict()),
)
def test_export_model(self):
# NOTE: This test might fail when run on a machine that has a GPU but when
# CUDA is not linked (JAX will detect the GPU so jax2tf will try to create
# a TF graph on the GPU and fail)
config = self._get_test_config()
config = self._add_const_model_config(config)
config = self._add_pcen_melspec_frontend(config)
model_bundle, train_state = classifier.initialize_model(
workdir=self.train_dir, **config.init_config
)
train_state = model_bundle.ckpt.restore_or_initialize(train_state)
classifier.export_tf_model(
model_bundle,
train_state,
self.train_dir,
config.init_config.input_shape,
num_train_steps=0,
eval_sleep_s=0,
)
self.assertTrue(
tf.io.gfile.exists(os.path.join(self.train_dir, "model.tflite"))
)
self.assertTrue(
tf.io.gfile.exists(os.path.join(self.train_dir, "label.csv"))
)
with open(os.path.join(self.train_dir, "label.csv")) as f:
got_class_list = namespace.ClassList.from_csv(f.readlines())
# Check equality of the ClassList with the Model Bundle.
self.assertEqual(model_bundle.class_lists["label"], got_class_list)
self.assertTrue(
tf.io.gfile.exists(
os.path.join(self.train_dir, "savedmodel/saved_model.pb")
)
)
# Check that saved_model inference doesn't crash.
# Currently lax.scan (used in the non-convolutional PCEN) fails.
# See: https://github.com/google/jax/issues/12504
# The convolutional EMA (using conv_width != 0) works, though.
reloaded = tf.saved_model.load(os.path.join(self.train_dir, "savedmodel"))
audio = jnp.zeros([1, config.sample_rate_hz])
reloaded.infer_tf(audio)
@parameterized.parameters(
baseline,
baseline_attention,
baseline_mel_conformer,
)
def test_init(self, config_module):
# Ensure that we can initialize the model with the each config.
config = self._get_test_config(config_module)
# Check that field reference for num_train_steps propogated appropriately.
self.assertEqual(
config.train_config.num_train_steps, config.eval_config.num_train_steps
)
model_bundle, train_state = classifier.initialize_model(
workdir=self.train_dir, **config.init_config
)
self.assertIsNotNone(model_bundle)
self.assertIsNotNone(train_state)
def test_train_one_step(self):
config = self._get_test_config()
config = self._add_const_model_config(config)
config = self._add_pcen_melspec_frontend(config)
ds, _ = self._get_test_dataset(config)
model_bundle, train_state = classifier.initialize_model(
workdir=self.train_dir, **config.init_config
)
classifier.train(
model_bundle=model_bundle,
train_state=train_state,
train_dataset=ds,
logdir=self.train_dir,
**config.train_config,
)
ckpt = checkpoint.MultihostCheckpoint(self.train_dir)
self.assertIsNotNone(ckpt.latest_checkpoint)
def test_eval_one_step(self):
config = self._get_test_config()
config = self._add_const_model_config(config)
config = self._add_pcen_melspec_frontend(config)
ds, _ = self._get_test_dataset(config)
model_bundle, train_state = classifier.initialize_model(
workdir=self.train_dir, **config.init_config
)
# Write a checkpoint, or else the eval will hang.
model_bundle.ckpt.save(train_state)
config.eval_config.num_train_steps = 0
classifier.evaluate(
model_bundle=model_bundle,
train_state=train_state,
valid_dataset=ds,
workdir=self.train_dir,
eval_sleep_s=0,
**config.eval_config,
)
ckpt = checkpoint.MultihostCheckpoint(self.train_dir)
self.assertIsNotNone(ckpt.latest_checkpoint)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train."""
import os
import tempfile
from absl import logging
from chirp import audio_utils
from chirp import config_utils
from chirp.configs import config_globals
from chirp.configs import separator as separator_config
from chirp.data import utils as data_utils
from chirp.data.bird_taxonomy import bird_taxonomy
from chirp.tests import fake_dataset
from chirp.train import separator
from clu import checkpoint
import jax
from ml_collections import config_dict
import numpy as np
import tensorflow as tf
from absl.testing import absltest
_c = config_utils.callable_config
class TrainSeparationTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.train_dir = tempfile.TemporaryDirectory('train_dir').name
self.data_dir = tempfile.TemporaryDirectory('data_dir').name
# The following config should be practically equivalent to what was done
# before: audio feature shape will be [sample_rate]
config = bird_taxonomy.BirdTaxonomyConfig(
name='sep_train_test_config',
sample_rate_hz=32_000,
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=1.0,
)
fake_builder = fake_dataset.FakeDataset(
config=config, data_dir=self.data_dir
)
fake_builder.download_and_prepare()
self.builder = fake_builder
def _get_test_dataset(self, split, config):
config.dataset_directory = self.builder.data_dir
config.tfds_data_dir = ''
if 'train' in split:
pipeline_ = config.train_dataset_config.pipeline
else:
pipeline_ = config.eval_dataset_config.pipeline
ds, dataset_info = data_utils.get_dataset(
split,
is_train=False, # Avoid shuffle in tests.
dataset_directory=config.dataset_directory,
tfds_data_dir=config.tfds_data_dir,
pipeline=pipeline_,
)
if 'train' in split:
ds = ds.repeat()
return ds, dataset_info
def _get_test_config(self, use_small_encoder=True) -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = separator_config.get_config()
config.init_config.target_class_list = 'tiny_species'
window_size_s = config_dict.FieldReference(1)
config.train_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list='tiny_species',
add_taxonomic_labels=True,
),
_c('pipeline.MixAudio', mixin_prob=1.0),
_c('pipeline.Batch', batch_size=2, split_across_devices=True),
_c('pipeline.RandomSlice', window_size=window_size_s),
],
)
config.eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list='tiny_species',
add_taxonomic_labels=True,
),
_c('pipeline.MixAudio', mixin_prob=1.0),
_c('pipeline.Batch', batch_size=2, split_across_devices=True),
_c(
'pipeline.Slice',
window_size=window_size_s,
start=0,
names=('audio',),
),
],
)
config.train_config.num_train_steps = 1
config.train_config.checkpoint_every_steps = 1
config.train_config.log_every_steps = 1
config.eval_config.eval_steps_per_checkpoint = 1
if use_small_encoder:
soundstream_config = config_dict.ConfigDict()
soundstream_config.base_filters = 2
soundstream_config.bottleneck_filters = 4
soundstream_config.output_filters = 8
soundstream_config.num_residual_layers = 2
soundstream_config.output_filters = 16
soundstream_config.strides = (2, 2)
soundstream_config.feature_mults = (2, 2)
soundstream_config.groups = (1, 2)
config.init_config.model_config.num_mask_channels = 2
config.init_config.model_config.mask_kernel_size = 2
config.init_config.model_config.classify_features = 4
config.init_config.model_config.mask_generator = _c(
'soundstream_unet.SoundstreamUNet', soundstream_config
)
config = config_utils.parse_config(config, config_globals.get_globals())
return config
def test_config_structure(self):
# Check that the test config and model config have similar structure.
raw_config = separator_config.get_config()
parsed_config = config_utils.parse_config(
raw_config, config_globals.get_globals()
)
test_config = self._get_test_config()
self.assertEqual(
jax.tree_util.tree_structure(parsed_config.to_dict()),
jax.tree_util.tree_structure(test_config.to_dict()),
)
def test_init_baseline(self):
# Ensure that we can initialize the model with the baseline config.
config = separator_config.get_config()
config = config_utils.parse_config(config, config_globals.get_globals())
model_bundle, train_state = separator.initialize_model(
workdir=self.train_dir, **config.init_config
)
self.assertIsNotNone(model_bundle)
self.assertIsNotNone(train_state)
def test_train_one_step(self):
config = self._get_test_config(use_small_encoder=True)
ds, _ = self._get_test_dataset(
'train',
config,
)
model = separator.initialize_model(
workdir=self.train_dir, **config.init_config
)
separator.train(
*model, train_dataset=ds, logdir=self.train_dir, **config.train_config
)
ckpt = checkpoint.MultihostCheckpoint(self.train_dir)
self.assertIsNotNone(ckpt.latest_checkpoint)
def test_eval_one_step(self):
config = self._get_test_config(use_small_encoder=True)
config.init_config.model_config.mask_generator.groups = (1, 1)
config.eval_config.num_train_steps = 0
ds, _ = self._get_test_dataset('test', config)
model_bundle, train_state = separator.initialize_model(
workdir=self.train_dir, **config.init_config
)
# Write a chekcpoint, or else the eval will hang.
model_bundle.ckpt.save(train_state)
separator.evaluate(
model_bundle=model_bundle,
train_state=train_state,
valid_dataset=ds,
workdir=self.train_dir,
eval_sleep_s=0,
**config.eval_config,
)
ckpt = checkpoint.MultihostCheckpoint(self.train_dir)
self.assertIsNotNone(ckpt.latest_checkpoint)
def test_export_model(self):
logging.info('Export Test: Initializing JAX model.')
config = self._get_test_config(use_small_encoder=True)
config.init_config.model_config.mask_generator.groups = (1, 1)
config.export_config.num_train_steps = 0
model_bundle, train_state = separator.initialize_model(
workdir=self.train_dir, **config.init_config
)
logging.info('Export Test: Exporting model.')
print('export_config : ', config.export_config)
frame_size = 32 * 2 * 2 * 250
separator.export_tf_model(
model_bundle,
train_state,
self.train_dir,
eval_sleep_s=0,
**config.export_config,
)
self.assertTrue(
tf.io.gfile.exists(os.path.join(self.train_dir, 'model.tflite'))
)
self.assertTrue(
tf.io.gfile.exists(
os.path.join(self.train_dir, 'savedmodel/saved_model.pb')
)
)
self.assertTrue(
tf.io.gfile.exists(os.path.join(self.train_dir, 'label.csv'))
)
logging.info('Export Test: Loading SavedModel.')
# Check that we can execute the saved model.
reloaded_model = tf.saved_model.load(
os.path.join(self.train_dir, 'savedmodel')
)
num_seconds = 3
framed_inputs = np.zeros([1, num_seconds, frame_size])
logging.info('Export Test: Executing SavedModel.')
sep_audio, logits, embeddings = reloaded_model.infer_tf(framed_inputs)
self.assertSequenceEqual(
sep_audio.shape,
[
1,
config.init_config.model_config.num_mask_channels,
num_seconds * frame_size,
],
)
self.assertSequenceEqual(
logits.shape, [1, 15, len(model_bundle.class_lists['label'].classes)]
)
self.assertSequenceEqual(
embeddings.shape,
[1, 15, config.init_config.model_config.classify_features],
)
logging.info('Export Test: Complete.')
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""soundevents dataset tests."""
import shutil
import tempfile
from unittest import mock
from chirp.data import soundevents
from etils import epath
import pandas as pd
import tensorflow_datasets as tfds
from absl.testing import absltest
def mock_localization_fn(audio, sr, interval_length_s, max_intervals):
del audio
del max_intervals
target_length = sr * interval_length_s
return [(0, target_length)]
class SoundeventsTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for the soundevents dataset."""
DATASET_CLASS = soundevents.Soundevents
BUILDER_CONFIG_NAMES_TO_TEST = [
config.name
for config in DATASET_CLASS.BUILDER_CONFIGS
if config.name in ['fsd50k_full_length']
]
DL_EXTRACT_RESULT = {}
DL_SAMPLE_FILES = {
'dev_samples': (
DATASET_CLASS.code_path.parent
/ 'placeholder_data'
/ 'dev_samples.json'
),
'eval_samples': (
DATASET_CLASS.code_path.parent
/ 'placeholder_data'
/ 'eval_samples.json'
),
}
SKIP_CHECKSUMS = True
@classmethod
def setUpClass(cls):
""""""
super().setUpClass()
# `self.create_tempdir()` raises an UnparsedFlagAccessError, which is why
# we use `tempdir` directly.
cls.tempdir = tempfile.mkdtemp()
_ = tfds.core.lazy_imports.librosa
# soundevent uses FSD50K_DATASET_INFO to load train and test splits.
# for creating test tfds, dev and eval set are created from sameples
# create audio files for dev set placeholder data samples
df_dev_samples = pd.read_json(cls.DL_SAMPLE_FILES['dev_samples'])
df_dev_samples.columns = ['fname', 'labels', 'mids', 'split']
subdir = epath.Path(cls.tempdir) / 'dev_audio'
subdir.mkdir(parents=True)
for _, row in df_dev_samples.iterrows():
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=10000).export(
subdir / f'{row["fname"]}.wav', format='wav'
)
# create audio files for eval set from placeholder_data samples
df_eval_samples = pd.read_json(cls.DL_SAMPLE_FILES['eval_samples'])
df_eval_samples.columns = ['fname', 'labels', 'mids']
subdir = epath.Path(cls.tempdir) / 'eval_audio'
subdir.mkdir(parents=True)
print(subdir)
for _, row in df_eval_samples.iterrows():
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=10000).export(
subdir / f'{row["fname"]}.wav', format='wav'
)
subdir = epath.Path(cls.tempdir) / 'FSD50K.ground_truth'
subdir.mkdir(parents=True)
df_dev_samples.to_csv(subdir / 'dev.csv', index=False)
df_eval_samples.to_csv(subdir / 'eval.csv', index=False)
cls.DL_EXTRACT_RESULT['dataset_info_dev'] = subdir / 'dev.csv'
cls.DL_EXTRACT_RESULT['dataset_info_eval'] = subdir / 'eval.csv'
cls.SPLITS = {'train': len(df_dev_samples), 'test': len(df_eval_samples)}
cls.EXAMPLE_DIR = epath.Path(cls.tempdir)
cls.url_patcher = mock.patch.object(
cls.DATASET_CLASS, 'GCS_URL', epath.Path(cls.tempdir)
)
cls.url_patcher.start()
mock_gcs_url = epath.Path(cls.tempdir)
mock_dataset_config = {
'dev': {
'ground_truth_file': mock_gcs_url / 'FSD50K.ground_truth/dev.csv',
'audio_dir': mock_gcs_url / 'dev_audio',
},
'eval': {
'ground_truth_file': mock_gcs_url / 'FSD50K.ground_truth/eval.csv',
'audio_dir': mock_gcs_url / 'eval_audio',
},
}
cls.config_patcher = mock.patch.object(
cls.DATASET_CLASS, 'DATASET_CONFIG', mock_dataset_config
)
cls.config_patcher.start()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.url_patcher.stop()
cls.config_patcher.stop()
shutil.rmtree(cls.tempdir)
@absltest.skip
def test_tags_are_valid(self):
pass
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for eval_lib."""
import functools
import os
import shutil
import tempfile
from typing import Any, Sequence
from chirp import config_utils
from chirp.configs import baseline_mel_conformer
from chirp.configs import config_globals
from chirp.data.bird_taxonomy import bird_taxonomy
from chirp.eval import callbacks
from chirp.eval import eval_lib
from chirp.taxonomy import namespace_db
from chirp.tests import fake_dataset
from chirp.train import classifier
import ml_collections
import numpy as np
import pandas as pd
import tensorflow as tf
from absl.testing import absltest
_c = config_utils.callable_config
def _stub_localization_fn(
audio: Any,
sample_rate_hz: int,
interval_length_s: float = 6.0,
max_intervals: int = 5,
) -> Sequence[tuple[int, int]]:
# The only purpose of this stub function is to avoid a default
# `localization_fn` value of None in `BirdTaxonomyConfig` so that the audio
# feature shape gets computed properly.
del audio, sample_rate_hz, interval_length_s, max_intervals
return []
class FakeBirdTaxonomy(fake_dataset.FakeDataset):
BUILDER_CONFIGS = [
bird_taxonomy.BirdTaxonomyConfig(
name='fake_variant_1',
localization_fn=_stub_localization_fn,
interval_length_s=6.0,
),
bird_taxonomy.BirdTaxonomyConfig(
name='fake_variant_2',
localization_fn=_stub_localization_fn,
interval_length_s=6.0,
),
]
class LoadEvalDatasetsTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = tempfile.TemporaryDirectory('data_dir').name
FakeBirdTaxonomy(
data_dir=self.data_dir, config='fake_variant_1'
).download_and_prepare()
FakeBirdTaxonomy(
data_dir=self.data_dir, config='fake_variant_2'
).download_and_prepare()
def test_return_value_structure(self):
fake_config = ml_collections.ConfigDict()
fake_config.dataset_configs = {
'fake_dataset_1': {
'tfds_name': 'fake_bird_taxonomy/fake_variant_1',
'tfds_data_dir': self.data_dir,
'pipeline': _c(
'pipeline.Pipeline', ops=[_c('pipeline.OnlyJaxTypes')]
),
'split': 'train',
},
'fake_dataset_2': {
'tfds_name': 'fake_bird_taxonomy/fake_variant_2',
'tfds_data_dir': self.data_dir,
'pipeline': _c(
'pipeline.Pipeline', ops=[_c('pipeline.OnlyJaxTypes')]
),
'split': 'train',
},
}
fake_config = config_utils.parse_config(
fake_config, config_globals.get_globals()
)
eval_datasets = eval_lib.load_eval_datasets(fake_config)
self.assertSameElements(
['fake_dataset_1', 'fake_dataset_2'], eval_datasets.keys()
)
for dataset in eval_datasets.values():
self.assertIsInstance(dataset, tf.data.Dataset)
self.assertContainsSubset(
['audio', 'label', 'bg_labels'], dataset.element_spec.keys()
)
def tearDown(self):
super().tearDown()
shutil.rmtree(self.data_dir)
class GetEmbeddingsTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = tempfile.TemporaryDirectory('data_dir').name
FakeBirdTaxonomy(
data_dir=self.data_dir, config='fake_variant_1'
).download_and_prepare()
def test_get_embeddings(self):
fake_config = ml_collections.ConfigDict()
fake_config.dataset_configs = {
'fake_dataset_1': {
'tfds_name': 'fake_bird_taxonomy/fake_variant_1',
'tfds_data_dir': self.data_dir,
'pipeline': _c(
'pipeline.Pipeline', ops=[_c('pipeline.OnlyJaxTypes')]
),
'split': 'train',
},
}
fake_config.model_callback = lambda x: x + 1
fake_config = config_utils.parse_config(
fake_config, config_globals.get_globals()
)
dataset = eval_lib.load_eval_datasets(fake_config)
(dataset_name,) = dataset.keys()
dataset = dataset[dataset_name]
embedded_dataset = eval_lib.get_embeddings(
dataset, fake_config.model_callback, batch_size=1
)
self.assertContainsSubset(
['embedding'], embedded_dataset.element_spec.keys()
)
embedding = next(embedded_dataset.as_numpy_iterator())['embedding']
self.assertTrue(((0 <= embedding) & (embedding <= 2)).all())
def test_embedding_model_callback(self):
placeholder_callback = callbacks.EmbeddingModelCallback(
'placeholder_model', ml_collections.ConfigDict({'sample_rate': 32000})
)
fake_config = ml_collections.ConfigDict()
fake_config.dataset_configs = {
'fake_dataset_1': {
'tfds_name': 'fake_bird_taxonomy/fake_variant_1',
'tfds_data_dir': self.data_dir,
'pipeline': _c(
'pipeline.Pipeline', ops=[_c('pipeline.OnlyJaxTypes')]
),
'split': 'train',
},
}
fake_config.model_callback = lambda x: x + 1
fake_config = config_utils.parse_config(
fake_config, config_globals.get_globals()
)
dataset = eval_lib.load_eval_datasets(fake_config)
(dataset_name,) = dataset.keys()
dataset = dataset[dataset_name]
embedded_dataset = eval_lib.get_embeddings(
dataset, placeholder_callback.model_callback, batch_size=1
)
self.assertContainsSubset(
['embedding'], embedded_dataset.element_spec.keys()
)
def tearDown(self):
super().tearDown()
shutil.rmtree(self.data_dir)
class EvalSetTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.class_names = ['a', 'b', 'c']
self.fake_embeddings_df = pd.DataFrame({
'label': (['a'] * 4 + ['b'] * 4 + ['c'] * 4) * 2,
'embedding': [[0.0]] * 24,
'bg_labels': [
'',
'b',
'c',
'b c',
'',
'a',
'c',
'a c',
'',
'a',
'b',
'a b',
] * 2,
'dataset_name': ['dataset_1'] * 12 + ['dataset_2'] * 12,
})
self.embedded_datasets = {
'dataset_1': tf.data.Dataset.from_tensor_slices(
self.fake_embeddings_df.groupby('dataset_name')
.get_group('dataset_1')
.to_dict('list')
),
'dataset_2': tf.data.Dataset.from_tensor_slices(
self.fake_embeddings_df.groupby('dataset_name')
.get_group('dataset_2')
.to_dict('list')
),
}
def test_prepare_eval_sets(self):
partial_specification = functools.partial(
eval_lib.EvalSetSpecification,
class_names=self.class_names,
search_corpus_classwise_mask_fn=(lambda _: 'label.str.contains("")'),
class_representative_global_mask_expr='label.str.contains("")',
class_representative_classwise_mask_fn=(
lambda class_name: f'label.str.contains("{class_name}")'
),
num_representatives_per_class=0,
)
fake_config = ml_collections.ConfigDict()
fake_config.rng_seed = 1234
fake_config.model_callback = {'learned_representations': {'a': [0.0]}}
fake_config.debug = {'embedded_dataset_cache_path': ''}
fake_config.eval_set_specifications = {
'fake_specification_1': partial_specification(
search_corpus_global_mask_expr='dataset_name == "dataset_1"'
),
'fake_specification_2': partial_specification(
search_corpus_global_mask_expr='dataset_name == "dataset_2"'
),
}
eval_sets = list(
eval_lib.prepare_eval_sets(fake_config, self.embedded_datasets)
)
# There should be two eval sets.
self.assertEqual(
[eval_set.name for eval_set in eval_sets],
['fake_specification_1', 'fake_specification_2'],
)
# There should be one classwise eval set per class.
for eval_set in eval_sets:
class_names = [
classwise_eval_set.class_name
for classwise_eval_set in eval_set.classwise_eval_sets
]
self.assertEqual(class_names, ['a', 'b', 'c'])
def test_eval_set_generator(self):
num_representatives_per_class = 2
fake_config = ml_collections.ConfigDict()
fake_config.rng_seed = 1234
fake_config.model_callback = {'learned_representations': {'a': [0.0]}}
fake_config.debug = {'embedded_dataset_cache_path': ''}
fake_config.eval_set_specifications = {
'fake_specification': eval_lib.EvalSetSpecification(
search_corpus_global_mask_expr='dataset_name == "dataset_1"',
class_names=self.class_names,
search_corpus_classwise_mask_fn=(
lambda n: f'not bg_labels.str.contains("{n}")'
),
class_representative_global_mask_expr='dataset_name == "dataset_1"',
class_representative_classwise_mask_fn=(
lambda n: f'label.str.contains("{n}")'
),
num_representatives_per_class=num_representatives_per_class,
)
}
(eval_set,) = eval_lib.prepare_eval_sets(
fake_config, self.embedded_datasets
)
for classwise_eval_set in eval_set.classwise_eval_sets:
class_name = classwise_eval_set.class_name
class_representatives_df = classwise_eval_set.class_representatives_df
search_corpus_df = eval_set.search_corpus_df[
classwise_eval_set.search_corpus_mask
]
# We should get the number of class representatives we requested.
self.assertLen(class_representatives_df, num_representatives_per_class)
# All class representatives should have the label `class_name`.
self.assertTrue((class_representatives_df['label'] == class_name).all())
# According to our `search_corpus_classwise_mask_fn`, `class_name` should
# not appear in any background label.
self.assertTrue(
(~search_corpus_df['bg_labels'].str.contains(class_name)).all()
)
# Class representatives should not be included in the search corpus.
self.assertTrue(
(~search_corpus_df.index.isin(class_representatives_df.index)).all()
)
# Embeddings from 'dataset_2' should not be found anywhere.
self.assertTrue(
(class_representatives_df['dataset_name'] != 'dataset_2').all()
)
self.assertTrue((search_corpus_df['dataset_name'] != 'dataset_2').all())
# By construction of `self.embeddings_df`, we know that the above three
# result in 4 + 2 + 12 = 18 rows being excluded.
self.assertLen(search_corpus_df, 6)
class SearchProcedureTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.query = [1, 0]
self.species_id = 'C'
self.search_corpus = pd.DataFrame({
'embedding': [[0, 1], [1, 0], [1, 1]],
'label': ['B', 'A', 'C'],
'bg_labels': ['B C', ' ', 'A'],
})
def test_query_search(self):
score = pd.Series([0.0, 1.0, 0.7071])
search_corpus_mask = pd.Series([False, True, True])
actual_query_result = eval_lib._make_species_scores_df(
score=score,
species_id=self.species_id,
search_corpus=self.search_corpus,
search_corpus_mask=search_corpus_mask,
)
expected_query_result = pd.DataFrame({
'score': score.tolist(),
'species_match': [1, 0, 1],
'label_mask': search_corpus_mask.tolist(),
})
actual_query_scores = actual_query_result['score'].round(4)
expected_query_scores = expected_query_result['score']
self.assertTrue((actual_query_scores == expected_query_scores).all())
actual_query_matches = actual_query_result['species_match']
expected_query_matches = expected_query_result['species_match']
self.assertTrue((actual_query_matches == expected_query_matches).all())
actual_label_mask = actual_query_result['label_mask']
expected_label_mask = expected_query_result['label_mask']
self.assertTrue((actual_label_mask == expected_label_mask).all())
class DefaultFunctionsTest(absltest.TestCase):
def test_create_averaged_query(self):
embedding1 = np.arange(0, 5)
embedding2 = np.arange(1, 6)
embeddings = [embedding1, embedding2]
actual_avg_query = eval_lib.create_averaged_query(embeddings)
expected_avg_query = np.array([0.5, 1.5, 2.5, 3.5, 4.5])
self.assertTrue(actual_avg_query.tolist(), expected_avg_query.tolist())
def test_cosine_similarity(self):
embedding = np.array([[0.0, 1.0, 2.0, 3.0, 4.0]])
actual_similarity = eval_lib.cosine_similarity(embedding, embedding)
expected_similarity = 1.0
np.testing.assert_allclose(actual_similarity, expected_similarity)
orthog_embedding0 = np.array([[-0.5, 0.0, -0.5, 0.0, -0.5]])
orthog_embedding1 = np.array([[0.0, 0.5, 0.0, 0.5, 0.0]])
actual_similarity = eval_lib.cosine_similarity(
orthog_embedding0, orthog_embedding1
)
expected_similarity = 0.0
np.testing.assert_allclose(actual_similarity, expected_similarity)
opposite_embedding0 = np.array([[-1.0, -1.0, -1.0, -1.0, -1.0]])
opposite_embedding1 = np.array([[1.0, 1.0, 1.0, 1.0, 1.0]])
actual_similarity = eval_lib.cosine_similarity(
opposite_embedding0, opposite_embedding1
)
expected_similarity = -1.0
np.testing.assert_allclose(actual_similarity, expected_similarity)
class TaxonomyModelCallbackTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.workdir = tempfile.TemporaryDirectory('workdir').name
def test_learned_representation_blocklist(self):
workdir = os.path.join(self.workdir, '1')
init_config = config_utils.parse_config(
baseline_mel_conformer.get_config(), config_globals.get_globals()
).init_config
model_bundle, train_state = classifier.initialize_model(
workdir=workdir, **init_config
)
_ = model_bundle.ckpt.restore_or_initialize(train_state)
db = namespace_db.load_db()
all_species = db.class_lists['xenocanto'].classes
downstream_species = db.class_lists['downstream_species_v2'].classes
# The model callback should load all available learned representations when
# use_learned_representations is set to True (by default, set to False).
self.assertLen(
callbacks.TaxonomyModelCallback(
init_config=init_config,
workdir=workdir,
use_learned_representations=True,
).learned_representations,
len(all_species),
)
# When learned_representation_blocklist is passed, the model callback
# should *not* load any learned representation for species in the blocklist.
self.assertNoCommonElements(
callbacks.TaxonomyModelCallback(
init_config=init_config,
workdir=workdir,
use_learned_representations=True,
learned_representation_blocklist=downstream_species,
).learned_representations.keys(),
downstream_species,
)
def tearDown(self):
super().tearDown()
shutil.rmtree(self.workdir)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EfficientNet."""
import operator
from chirp.models import soundstream_unet
from jax import numpy as jnp
from jax import random
from jax import tree_util
from absl.testing import absltest
class SoundstreamUNetTest(absltest.TestCase):
def test_soundstream_unet(self):
batch_size = 2
input_time_steps = 16
input_width = 8
model = soundstream_unet.SoundstreamUNet(
base_filters=2,
bottleneck_filters=4,
output_filters=8,
strides=(2, 2),
feature_mults=(2, 2),
groups=(1, 2),
)
inp_audio = jnp.zeros([batch_size, input_time_steps, input_width])
(out, embedding), variables = model.init_with_output(
{"params": random.PRNGKey(0)}, inp_audio, train=True
)
self.assertEqual(out.shape, inp_audio.shape)
# Embedding shape: (batch, input_time / prod(strides), bottleneck_filters).
self.assertEqual(embedding.shape, (2, 4, 4))
num_parameters = tree_util.tree_reduce(
operator.add, tree_util.tree_map(jnp.size, variables["params"])
)
self.assertEqual(num_parameters, 864)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
import functools
import os
from chirp.models import cwt
from chirp.models import metrics
from clu import metrics as clu_metrics
import flax
import jax
from jax import numpy as jnp
from absl.testing import absltest
@flax.struct.dataclass
class ValidationMetrics(clu_metrics.Collection):
valid_map: clu_metrics.Average.from_fun(metrics.map_)
class MetricsTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Test with two CPU devices.
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=2"
def test_parallel_metric_agreemenet(self):
@jax.jit
def update_metrics(valid_metrics, labels, logits):
return valid_metrics.merge(
ValidationMetrics.single_from_model_output(
logits=logits, labels=labels
)
)
@functools.partial(jax.pmap, axis_name="batch")
def p_update_metrics(valid_metrics, labels, logits):
return valid_metrics.merge(
ValidationMetrics.gather_from_model_output(
logits=logits, labels=labels, axis_name="batch"
)
)
batch_size = 4
num_classes = 5
key = jax.random.PRNGKey(2)
logits = jax.random.normal(key, [batch_size, num_classes])
labels = jax.numpy.float32(logits < 0)
valid_metrics = ValidationMetrics.empty()
valid_metrics = update_metrics(valid_metrics, labels, logits)
serial_metrics = valid_metrics.compute()
# Compute replicated metrics.
valid_metrics = flax.jax_utils.replicate(ValidationMetrics.empty())
logits_repl = flax.jax_utils.replicate(logits)
labels_repl = flax.jax_utils.replicate(labels)
valid_metrics = p_update_metrics(valid_metrics, labels_repl, logits_repl)
repl_metrics = flax.jax_utils.unreplicate(valid_metrics).compute()
for k in ["valid_map"]:
self.assertEqual(serial_metrics[k], repl_metrics[k])
self.assertGreaterEqual(serial_metrics[k], 0.0)
self.assertLessEqual(serial_metrics[k], 1.0)
def test_average_precision_no_labels(self):
batch_size = 4
num_classes = 5
key = jax.random.PRNGKey(2)
logits = jax.random.normal(key, [batch_size, num_classes])
labels = jax.numpy.zeros_like(logits)
av_prec = jax.numpy.mean(metrics.average_precision(logits, labels))
self.assertEqual(av_prec, 0.0)
def test_least_squares_mixit(self):
# Create some genuinely interesting source signals...
xs = jnp.linspace(-jnp.pi, jnp.pi, 256)
f3 = cwt.gabor_filter(3, cwt.Domain.TIME, cwt.Normalization.L2)(xs).real
f9 = cwt.gabor_filter(9, cwt.Domain.TIME, cwt.Normalization.L2)(xs).real
f5 = cwt.gabor_filter(5, cwt.Domain.TIME, cwt.Normalization.L2)(xs).real
f25 = cwt.gabor_filter(25, cwt.Domain.TIME, cwt.Normalization.L2)(xs).real
mix1 = f3 + f9
mix2 = f5 + f25
reference = jnp.concatenate(
[mix1[jnp.newaxis, jnp.newaxis, :], mix2[jnp.newaxis, jnp.newaxis, :]],
1,
)
estimate = jnp.concatenate(
[
f3[jnp.newaxis, jnp.newaxis, :],
f5[jnp.newaxis, jnp.newaxis, :],
f9[jnp.newaxis, jnp.newaxis, :],
f25[jnp.newaxis, jnp.newaxis, :],
],
1,
)
best_mix, mix_matrix = metrics.least_squares_mixit(reference, estimate)
l1_err = lambda x, y: jnp.sum(jnp.abs(x - y))
# The mix matrix corresponding to the definition of mix1 and mix2.
expected_mix = jnp.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
self.assertEqual(l1_err(mix_matrix, expected_mix), 0.0)
# The best_mix should recover the mixture channels exactly.
self.assertEqual(l1_err(best_mix[0, 0], mix1), 0.0)
self.assertEqual(l1_err(best_mix[0, 1], mix2), 0.0)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for conformer."""
import operator
from chirp.models import conformer
from jax import numpy as jnp
from jax import random
from jax import tree_util
from absl.testing import absltest
class ConformerTest(absltest.TestCase):
def test_conv_subsample(self):
batch_size = 2
time = 500
freqs = 80
features = 144
channels = 1
inputs = jnp.ones((batch_size, time, freqs, channels))
subsample = conformer.ConvolutionalSubsampling(features=features)
key = random.PRNGKey(0)
outputs, variables = subsample.init_with_output(key, inputs, train=False)
self.assertEqual(outputs.shape, (batch_size, time // 4, features))
num_parameters = tree_util.tree_reduce(
operator.add, tree_util.tree_map(jnp.size, variables['params'])
)
expected_num_parameters = (
3 * 3 * 144
+ 144
+ 3 * 3 * 144 * 144 # First conv layer
+ 144
+ freqs // 4 * 144 * 144 # Second conv layer
+ 144 # Projection layer
)
self.assertEqual(num_parameters, expected_num_parameters)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Soundscapes library and specific dataset functionality."""
import os
import tempfile
from absl import logging
from chirp import path_utils
from chirp.data.soundscapes import dataset_fns
from chirp.data.soundscapes import soundscapes
from chirp.data.soundscapes import soundscapes_lib
from chirp.taxonomy import namespace_db
from etils import epath
import librosa
import tensorflow_datasets as tfds
from absl.testing import absltest
from absl.testing import parameterized
BUILDER_CONFIGS = soundscapes.Soundscapes.BUILDER_CONFIGS
SUPERVISED_CONFIGS = [b for b in BUILDER_CONFIGS if b.supervised]
UNSUPERVISED_CONFIGS = [b for b in BUILDER_CONFIGS if not b.supervised]
class SoundscapesLibTest(parameterized.TestCase):
def _make_audio(self, audio_path, filename, extension, all_audio_filepaths):
"""Creates a test audio file."""
if not filename.endswith(extension):
filename = '.'.join([filename, extension])
audio_filepath = audio_path / filename
if not audio_filepath.parent.exists():
audio_filepath.parent.mkdir(parents=True)
if audio_filepath in all_audio_filepaths:
return
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=100000).export(
audio_filepath, format=extension
)
logging.info('created audio file : %s', audio_filepath)
all_audio_filepaths.append(audio_filepath)
return audio_filepath
def setUp(self):
super(SoundscapesLibTest, self).setUp()
self.data_dir = tempfile.TemporaryDirectory('data_dir').name
os.mkdir(self.data_dir)
# We use the 'caples.csv' only to get the parent directory's path.
self.testdata_dir = path_utils.get_absolute_path(
'tests/testdata/caples.csv'
).parent
def test_load_caples_annotations(self):
annos_csv_path = path_utils.get_absolute_path('tests/testdata/caples.csv')
annos = dataset_fns.load_caples_annotations(annos_csv_path)
# There are six lines in the example file, but one contains a 'comros'
# annotation which should be dropped.
self.assertLen(annos, 5)
expected_labels = ['gockin', 'dusfly', 'dusfly', 'dusfly', 'yerwar']
for expected_label, (_, anno) in zip(expected_labels, annos.iterrows()):
# Caples annotations only contain the filename's stem.
self.assertLen(anno.filename.split('.'), 1)
self.assertEqual(anno.namespace, 'ebird2021')
self.assertEqual(anno.label, [expected_label])
@parameterized.named_parameters([
('_ssw', 'ssw.csv', ['cangoo', 'blujay', 'rewbla', 'cangoo']),
(
'_sierras_kahl',
'sierras_kahl.csv',
['amerob', 'amerob', 'herthr', 'herthr'],
),
(
'_peru',
'peru.csv',
['blfant1', 'grasal3', 'greant1', 'butwoo1', 'unknown'],
),
(
'_hawaii',
'hawaii.csv',
['hawama', 'hawama', 'ercfra', 'jabwar', 'jabwar'],
),
(
'_high_sierras',
'high_sierras.csv',
['gcrfin', 'gcrfin', 'gcrfin', 'whcspa', 'whcspa', 'amepip'],
),
(
'_coffee_farms',
'coffee_farms.csv',
['compot1', 'compot1', 'compot1', 'compot1'],
),
])
def test_load_cornell_annotations(self, csv_name, expected_labels):
annos_csv_path = path_utils.get_absolute_path('tests/testdata/' + csv_name)
annos = dataset_fns.load_cornell_annotations(annos_csv_path)
self.assertLen(annos, len(expected_labels))
for expected_label, (_, anno) in zip(expected_labels, annos.iterrows()):
self.assertTrue(anno.filename.endswith('.flac'))
self.assertLen(anno.filename.split('.'), 2)
self.assertEqual(anno.namespace, 'ebird2021')
self.assertEqual(anno.label, [expected_label])
def test_load_powdermill_annotations(self):
# Combine the Hawaii 'raw' annotations into a single csv.
combined_csv_path = epath.Path(self.data_dir) / 'powdermill.csv'
dataset_fns.combine_powdermill_annotations(
self.testdata_dir / 'powdermill', combined_csv_path
)
annos_csv_path = path_utils.get_absolute_path(
'tests/testdata/powdermill.csv'
)
for csv_path in [combined_csv_path, annos_csv_path]:
annos = dataset_fns.load_powdermill_annotations(csv_path)
self.assertLen(annos, 5)
expected_labels = [
'norcar',
'woothr',
'eastow',
'eastow',
'eastow',
]
for expected_label, (_, anno) in zip(expected_labels, annos.iterrows()):
self.assertTrue(anno.filename.endswith('.wav'))
self.assertLen(anno.filename.split('.'), 2)
# Check that we got the nested filepath.
self.assertEqual(
anno.filename, 'Recording_1/Recording_1_Segment_05.wav'
)
self.assertEqual(anno.namespace, 'ebird2021')
self.assertEqual(anno.label, [expected_label])
@parameterized.named_parameters(
dict(testcase_name='_' + bc.name, builder_config=bc)
for bc in SUPERVISED_CONFIGS
)
def test_create_annotated_segments_df(self, builder_config):
if builder_config.annotation_filename == 'annotations.csv':
filename = f'{builder_config.name}.csv'.replace('_full_length', '')
elif builder_config.annotation_filename:
filename = builder_config.annotation_filename
else:
filename = f'{builder_config.name}.csv'
annotations_path = self.testdata_dir / filename
annos = builder_config.annotation_load_fn(annotations_path)
if not builder_config.supervised:
raise ValueError('Running a supervised test on an unsupervised config.')
# Create some audio files.
audio_path = epath.Path(self.data_dir) / builder_config.name / 'audio'
all_audio_filepaths = []
for _, anno in annos.iterrows():
if anno.filename.endswith('.wav'):
self._make_audio(audio_path, anno.filename, 'wav', all_audio_filepaths)
elif anno.filename.endswith('.flac'):
self._make_audio(audio_path, anno.filename, 'flac', all_audio_filepaths)
else:
# Probably just a stem; make a wav file.
self._make_audio(audio_path, anno.filename, 'wav', all_audio_filepaths)
# Finally, check that the lights are on.
segments = soundscapes_lib.create_segments_df(
all_audio_filepaths=all_audio_filepaths,
annotations_df=annos,
supervised=builder_config.supervised,
metadata_load_fn=builder_config.metadata_load_fn,
metadata_dir=self.testdata_dir,
metadata_fields=builder_config.metadata_fields,
)
self.assertLen(segments, len(annos))
def test_get_full_length_annotations(self):
builder_config = [
c for c in BUILDER_CONFIGS if c.name == 'caples_full_length'
][0]
filename = (
builder_config.annotation_filename or f'{builder_config.name}.csv'
)
annotations_path = self.testdata_dir / filename
annos = builder_config.annotation_load_fn(annotations_path)
audio_path = epath.Path(self.data_dir) / builder_config.name / 'audio'
all_audio_filepaths = []
for _, anno in annos.iterrows():
self._make_audio(audio_path, anno.filename, 'wav', all_audio_filepaths)
segments = soundscapes_lib.create_segments_df(
all_audio_filepaths=all_audio_filepaths,
annotations_df=annos,
supervised=builder_config.supervised,
metadata_load_fn=builder_config.metadata_load_fn,
metadata_dir=self.testdata_dir,
metadata_fields=builder_config.metadata_fields,
)
# The Caples testdata contains only a single file, so no need to subselect.
self.assertLen(all_audio_filepaths, 1)
audio, _ = librosa.load(all_audio_filepaths[0], sr=32000)
db = namespace_db.load_db()
annotations = soundscapes_lib.get_full_length_annotations(
audio, segments, db.class_lists['caples'], 32000, unknown_guard=True
)
# Check that unknown guard annotations exist.
self.assertLen(annotations, 7)
self.assertEqual(annotations['label'][0], 'unknown')
self.assertEqual(annotations['label'][6], 'unknown')
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pooling."""
import functools
from chirp.models import pooling
from jax import numpy as jnp
from jax import random
import numpy as np
from absl.testing import absltest
class PoolingTest(absltest.TestCase):
def test_gaussian_pooling(self):
num_channels = 2
num_steps = 100
window_size = 50
inputs = jnp.reshape(
jnp.arange(num_channels * num_steps, dtype=jnp.float32),
(num_channels, num_steps),
)
inputs = inputs.T[jnp.newaxis]
window_pool = pooling.WindowPool(
window=pooling.gaussian,
window_size=window_size,
window_init=functools.partial(pooling.gaussian_init, std=5.0),
stride=window_size,
padding="VALID",
)
rng = random.PRNGKey(0)
variables = window_pool.init(rng, inputs)
outputs = window_pool.apply(variables, inputs)
self.assertEqual(outputs.shape, (1, num_steps // window_size, num_channels))
np.testing.assert_allclose(outputs[0, 0, 0], (window_size - 1) / 2)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for audio utilities."""
import os
from chirp import audio_utils
from jax import numpy as jnp
from jax import random
from jax import scipy as jsp
from librosa.core import spectrum
import numpy as np
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
class AudioUtilsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.length_s = 2
cls.sample_rate_hz = 11_025
cls.num_frames = cls.sample_rate_hz * cls.length_s
cls.batch_dims = (2, 3)
cls.signal = jnp.sin(jnp.linspace(0.0, 440 * jnp.pi, cls.num_frames))
cls.noise = 0.5 * random.normal(
random.PRNGKey(0), cls.batch_dims + (cls.num_frames,)
)
cls.audio = cls.signal + cls.noise
_, _, cls.spectrogram = jsp.signal.stft(cls.audio)
def test_load_audio(self):
wav_path = os.path.join(
os.path.normpath(os.path.dirname(__file__)),
'testdata',
'tfds_builder_wav_directory_test',
'clap.wav',
)
audio = audio_utils.load_audio(wav_path, 32000)
self.assertLen(audio, 678240)
def test_multi_load_audio(self):
wav_path = os.path.join(
os.path.normpath(os.path.dirname(__file__)),
'testdata',
'tfds_builder_wav_directory_test',
'clap.wav',
)
offsets = [0.0, 0.1, 0.2]
audios = list(
audio_utils.multi_load_audio_window([wav_path] * 3, offsets, 32000, -1)
)
# The first result should be the full wav file.
self.assertLen(audios, 3)
self.assertLen(audios[0], 678240)
# The second result has offset 0.1s.
# Note that because the audio is resampled to 32kHz, we don't have perfect
# numerical equality.
self.assertLen(audios[1], 678240 - int(0.1 * 32000))
np.testing.assert_array_almost_equal(
audios[0][int(0.1 * 32000) :], audios[1], 4
)
# The third result has offset 0.2s.
self.assertLen(audios[2], 678240 - int(0.2 * 32000))
np.testing.assert_array_almost_equal(
audios[0][int(0.2 * 32000) :], audios[2], 4
)
def test_pcen(self):
gain = 0.5
smoothing_coef = 0.1
bias = 2.0
root = 2.0
eps = 1e-6
spec = jnp.abs(self.spectrogram)
out = audio_utils.pcen(
spec,
gain=gain,
smoothing_coef=smoothing_coef,
bias=bias,
root=root,
eps=eps,
)[0]
librosa_out = spectrum.pcen(
spec,
b=smoothing_coef,
gain=gain,
bias=bias,
power=1 / root,
eps=eps,
# librosa starts with an initial state of (1 - s), we start with x[0]
zi=(1 - smoothing_coef) * spec[..., 0:1, :],
axis=-2,
)
np.testing.assert_allclose(out, librosa_out, rtol=5e-2)
@parameterized.product(
# NOTE: TF and JAX have different outputs when nperseg is odd.
nperseg=(256, 230),
noverlap=(0, 17),
# NOTE: FFT length must be factorizable into primes less than 127 (this
# is a cuFFT restriction).
nfft=(256, 301),
boundary=('zeros', None),
padded=(True, False),
)
def test_stft_tf(self, nperseg, noverlap, nfft, boundary, padded):
batch_size = 3
sample_rate_hz = 22050
window = 'hann'
# NOTE: We don't test the Hamming window, since TensorFlow and SciPy have
# different implementations, which leads to slightly different results.
# To be precise, the difference is that:
# sp.signal.get_window('hamming', N) == tf.signal.hamming_window(N + 1)[:-1]
time_size = 5 * sample_rate_hz
audio = jnp.sin(jnp.linspace(0.0, 440 * jnp.pi, time_size))
noise = 0.01 * random.normal(random.PRNGKey(0), (batch_size, time_size))
signal = audio + noise
_, _, stfts = jsp.signal.stft(
signal,
fs=1 / sample_rate_hz,
window=window,
nperseg=nperseg,
noverlap=noverlap,
nfft=nfft,
boundary=boundary,
padded=padded,
)
stfts_tf = audio_utils.stft_tf(
tf.constant(signal),
fs=1 / sample_rate_hz,
window=window,
nperseg=nperseg,
noverlap=noverlap,
nfft=nfft,
boundary=boundary,
padded=padded,
)
np.testing.assert_allclose(stfts, stfts_tf.numpy(), atol=1e-5)
def test_pad_to_length_if_shorter(self):
audio = jnp.asarray([-1, 0, 1, 0], dtype=jnp.float32)
np.testing.assert_allclose(
audio_utils.pad_to_length_if_shorter(audio, 4), audio
)
np.testing.assert_allclose(
audio_utils.pad_to_length_if_shorter(audio, 6),
jnp.asarray([0, -1, 0, 1, 0, -1], dtype=jnp.float32),
)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for chirp.signal."""
from chirp import signal
from jax import numpy as jnp
import numpy as np
import tensorflow as tf
from absl.testing import absltest
class SignalTest(absltest.TestCase):
def test_linear_to_mel_weight_matrix(self):
jax_val = signal.linear_to_mel_weight_matrix()
tf_val = tf.signal.linear_to_mel_weight_matrix()
np.testing.assert_allclose(jax_val, tf_val, rtol=1e-3)
def test_frame(self):
shape = (2, 7, 3)
signal_ = jnp.reshape(jnp.arange(2 * 7 * 3), shape)
frames = signal.frame(signal_, 5, 2, axis=1)
self.assertEqual(frames.shape, (2, 2, 5, 3))
np.testing.assert_array_equal(frames[1, 1, :, 2], signal_[1, 2:7, 2])
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xeno_canto."""
import functools
import shutil
import tempfile
from chirp.data import filter_scrub_utils as fsu
from chirp.tests import fake_dataset
import pandas as pd
import tensorflow_datasets as tfds
from absl.testing import absltest
class DataProcessingTest(absltest.TestCase):
"""Used to factorize the common setup between data processing tests."""
def setUp(self):
super().setUp()
# We define a toy dataframe that is easy to manually check
self.toy_df = pd.DataFrame({
'species_code': ['ostric2', 'ostric3', 'grerhe1'],
'Common name': ['Common Ostrich', 'Somali Ostrich', 'Greater Rhea'],
'bg_labels': [
['ostric3', 'grerhe1'],
['ostric2', 'grerhe1'],
['ostric2', 'ostric3'],
],
'Country': ['Colombia', 'Australia', 'France'],
})
# Additionally, we define a dataframe that mimics the actual dataset
# of interest. Test will be carried out on both dataframes.
self.temp_dir = tempfile.mkdtemp()
# self.temp_dir = self.create_tempdir()
# create_tempdir() raises an UnparsedFlagAccessError when debugged locally,
# Using `tempdir` instead and manually deleting the folder after test.
fake_builder = fake_dataset.FakeDataset(data_dir=self.temp_dir)
fake_builder.download_and_prepare()
self.fake_info = fake_builder.info
self.fake_df = tfds.as_dataframe(fake_builder.as_dataset()['train'])
def tearDown(self):
super().tearDown()
shutil.rmtree(self.temp_dir)
class NotInTests(DataProcessingTest):
def test_filtering_ideal(self):
"""Ensure filtering produces expected results in nominal case."""
# 1) Tests on the toy dataFrame.
test_df = self.toy_df.copy()
fn_call = functools.partial(
fsu.is_not_in, key='Country', values=['France', 'Brazil']
)
# Ensure we're properly filtering out species
self.assertEqual(fn_call(test_df).tolist(), [True, True, False])
# 2) Tests on the fake dataFrame.
df = self.fake_df.copy()
targeted_country = b'South Africa'
# If that doesn't pass, maybe South Africa is just no longer a good example
# to test on. Check fake_dataset how the recordings are populated.
self.assertIn(targeted_country, df['country'].unique())
filtering_fn = functools.partial(
fsu.is_not_in, key='country', values=[targeted_country]
)
filtered_df = df[filtering_fn(df)]
self.assertGreater(len(df), len(filtered_df))
# Ensure we've filtered all South African species.
self.assertEqual(filtered_df['country'].isin([targeted_country]).sum(), 0)
# Ensure we did not filter too many countries.
self.assertEqual(
(df['country'] == targeted_country).sum(), len(df) - len(filtered_df)
)
def test_filtering_wrong_field(self):
"""Ensure filtering raises adequate error when the field does not exist."""
test_df = self.toy_df.copy()
# Make a query with an error in the name of the field
with self.assertRaises(ValueError):
_ = fsu.is_not_in(test_df, key='county', values=['France', 'Brazil'])
def test_filtering_wrong_type(self):
"""Ensure filtering raises adequate error with wrong input type."""
test_df = self.toy_df.copy()
with self.assertRaises(TypeError):
_ = (
fsu.is_not_in(test_df, key='Country', values=[b'France', b'Brazil']),
)
class ScrubTest(DataProcessingTest):
def test_scrubbing_ideal(self):
"""Ensure scrubbing works as expected in nominal case."""
expected_df = self.toy_df.copy()
expected_df['bg_labels'] = [
['grerhe1'],
['ostric2', 'grerhe1'],
['ostric2'],
]
# 1) Simple scrubbing queries on the toy dataframe
test_df = self.toy_df.copy()
test_df = test_df.apply(
lambda row: fsu.scrub(row, 'bg_labels', ['ostric3']),
axis=1,
result_type='expand',
)
self.assertEqual(expected_df.to_dict(), test_df.to_dict())
# 2) Simple scrubbing queries on the fake_df
df = self.fake_df.copy()
targeted_country = b'South Africa'
key = 'label'
all_species = df[key].explode(key).unique()
south_african_species = (
df[df['country'] == targeted_country][key].explode(key).unique()
)
scrubbing_fn = functools.partial(
fsu.scrub, key=key, values=south_african_species
)
scrubbed_df = df.apply(scrubbing_fn, axis=1, result_type='expand')
remaining_species = scrubbed_df[key].explode(key).unique()
self.assertEqual(
set(south_african_species),
set(all_species).difference(remaining_species),
)
def test_scrubbing_empty_col(self):
"""Ensure scrubbing doesn't do anything if the column is empty."""
expected_df = self.toy_df.copy()
test_df = self.toy_df.copy()
expected_df['bg_labels'] = [[], [], []]
test_df['bg_labels'] = [[], [], []]
test_df = test_df.apply(
lambda row: fsu.scrub(row, 'bg_labels', ['ostric3']),
axis=1,
result_type='expand',
)
self.assertEqual(expected_df.to_dict(), test_df.to_dict())
def test_scrubbing_empty_query(self):
"""Ensure scrubbing doesn't do anything if `values` is an empty list."""
expected_df = self.toy_df.copy()
test_df = self.toy_df.copy()
test_df = test_df.apply(
lambda row: fsu.scrub(row, 'bg_labels', []),
axis=1,
result_type='expand',
)
self.assertEqual(expected_df.to_dict(), test_df.to_dict())
def test_scrub_no_side_effects(self):
"""Ensure scrubbing operation does not have side-effects."""
df = self.fake_df.copy()
key = 'label'
# Scrub every foreground label.
scrubbing_fn = functools.partial(
fsu.scrub, key=key, values=df.explode(key)[key].unique()
)
_ = df.apply(scrubbing_fn, axis=1, result_type='expand')
# If scrub function had side-effects, e.g. modified the row in-place,
# the df would also change.
self.assertTrue(self.fake_df.equals(df))
class QueryTest(DataProcessingTest):
def test_masking_query(self):
"""Ensure masking queries work as expected."""
mask_query = fsu.Query(
op=fsu.MaskOp.IN, kwargs={'key': 'species_code', 'values': ['ostric2']}
)
self.assertEqual(
fsu.apply_query(self.toy_df, mask_query).tolist(), [True, False, False]
)
def test_contains_query(self):
mask_query = fsu.Query(
op=fsu.MaskOp.CONTAINS_ANY,
kwargs={'key': 'bg_labels', 'values': ['ostric2']},
)
self.assertEqual(
fsu.apply_query(self.toy_df, mask_query).tolist(), [False, True, True]
)
mask_query = fsu.Query(
op=fsu.MaskOp.CONTAINS_NO,
kwargs={'key': 'bg_labels', 'values': ['ostric1', 'ostric2']},
)
self.assertEqual(
fsu.apply_query(self.toy_df, mask_query).tolist(), [True, False, False]
)
def test_scrub_query(self):
"""Ensure scrubbing queries work as expected."""
# Test scrubbing on list-typed fields.
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={'key': 'bg_labels', 'values': ['ostric2']},
)
df = fsu.apply_query(self.toy_df, scrub_query)
expected_df = self.toy_df.copy()
expected_df['bg_labels'] = [
['ostric3', 'grerhe1'],
['grerhe1'],
['ostric3'],
]
# Test scrubbing on str-typed fields.
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={'key': 'species_code', 'values': ['ostric2', 'ostric3']},
)
df = fsu.apply_query(self.toy_df, scrub_query)
expected_df = self.toy_df.copy()
expected_df['species_code'] = ['', '', 'grerhe1']
self.assertEqual(expected_df.to_dict(), df.to_dict())
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB_ALL_BUT,
kwargs={'key': 'species_code', 'values': ['ostric2', 'ostric3']},
)
df = fsu.apply_query(self.toy_df, scrub_query)
expected_df = self.toy_df.copy()
expected_df['species_code'] = ['ostric2', 'ostric3', '']
self.assertEqual(expected_df.to_dict(), df.to_dict())
# Test with a replace value
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={
'key': 'species_code',
'values': ['ostric2', 'ostric3'],
'replace_value': 'unknown',
},
)
df = fsu.apply_query(self.toy_df, scrub_query)
expected_df = self.toy_df.copy()
expected_df['species_code'] = ['unknown', 'unknown', 'grerhe1']
self.assertEqual(expected_df.to_dict(), df.to_dict())
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={
'key': 'bg_labels',
'values': ['ostric2', 'ostric3'],
'replace_value': 'unknown',
},
)
df = fsu.apply_query(self.toy_df, scrub_query)
expected_df = self.toy_df.copy()
expected_df['bg_labels'] = [
['unknown', 'grerhe1'],
['unknown', 'grerhe1'],
['unknown', 'unknown'],
]
self.assertEqual(expected_df.to_dict(), df.to_dict())
def test_complemented_query(self):
df = self.toy_df.copy()
df['unique_key'] = [0, 1, 2]
# Test nominal case with scrubbing query. Scrubbing does not remove any
# samples. Therefore check that setting complement to True returns an
# empty df.
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={'key': 'bg_labels', 'values': ['ostric2']},
)
new_df = fsu.apply_complement(
df, fsu.QueryComplement(scrub_query, 'unique_key')
)
self.assertEmpty(new_df)
# Test nominal case with filtering query
filter_query = fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
'mask_op': fsu.MaskOp.IN,
'op_kwargs': {'key': 'species_code', 'values': ['ostric2']},
},
)
self.assertEqual(
fsu.apply_complement(
df, fsu.QueryComplement(filter_query, 'unique_key')
).to_dict(),
df.drop([0]).to_dict(),
)
# Test that when values don't uniquely define each recording, an error
# is raised
with self.assertRaises(ValueError):
df['unique_key'] = [0, 1, 1]
fsu.apply_complement(df, fsu.QueryComplement(filter_query, 'unique_key'))
def test_append_query(self):
new_row = {
'bg_labels': 'ignore',
'species_code': 'ignore',
'Common name': 'ignore',
'Country': 'ignore',
}
append_query = fsu.Query(fsu.TransformOp.APPEND, {'row': new_row})
new_df = fsu.apply_query(self.toy_df, append_query)
self.assertEqual(
new_df.to_dict(),
pd.concat(
[self.toy_df, pd.Series(new_row)], ignore_index=True
).to_dict(),
)
# Append query with keys not matching the dataframe
append_query = fsu.Query(fsu.TransformOp.APPEND, {'row': {'a': 'b'}})
with self.assertRaises(ValueError):
fsu.apply_query(self.toy_df, append_query)
class QueryParallelTest(DataProcessingTest):
def test_merge_or(self):
mask_query_1 = fsu.Query(
fsu.MaskOp.IN, {'key': 'Country', 'values': ['Colombia']}
)
mask_query_2 = fsu.Query(
fsu.MaskOp.IN, {'key': 'species_code', 'values': ['grerhe1']}
)
query_parallel = fsu.QueryParallel(
[mask_query_1, mask_query_2], fsu.MergeStrategy.OR
)
mask = fsu.apply_parallel(self.toy_df, query_parallel)
self.assertEqual(mask.tolist(), [True, False, True])
self.assertIn('Colombia', self.toy_df[mask]['Country'].tolist())
self.assertIn('grerhe1', self.toy_df[mask]['species_code'].tolist())
# Ensure an error is raised if any element is not a boolean Series.
with self.assertRaises(TypeError):
fsu.or_series([
self.toy_df['species_code'],
self.toy_df['species_code'] == 'ostric2',
])
# Ensure an error is raised if Series don't pertain to the same set of
# recordings.
with self.assertRaises(RuntimeError):
fsu.or_series([
self.toy_df.drop(0)['species_code'] == 'ostric2',
self.toy_df['species_code'] == 'ostric2',
])
def test_merge_and(self):
mask_query_1 = fsu.Query(
fsu.MaskOp.IN, {'key': 'Country', 'values': ['Colombia', 'France']}
)
mask_query_2 = fsu.Query(
fsu.MaskOp.IN, {'key': 'species_code', 'values': ['grerhe1']}
)
query_parallel = fsu.QueryParallel(
[mask_query_1, mask_query_2], fsu.MergeStrategy.AND
)
mask = fsu.apply_parallel(self.toy_df, query_parallel)
self.assertEqual(mask.tolist(), [False, False, True])
# Ensure an error is raised if any element is not a boolean Series.
with self.assertRaises(RuntimeError):
fsu.and_series([
self.toy_df['species_code'],
self.toy_df['species_code'] == 'ostric2',
])
# Ensure an error is raised if Series don't pertain to the same set of
# recordings.
with self.assertRaises(RuntimeError):
fsu.and_series([
self.toy_df.drop(0)['species_code'] == 'ostric2',
self.toy_df['species_code'] == 'ostric2',
])
def test_merge_concat_no_duplicates(self):
filter_query_1 = fsu.Query(
fsu.TransformOp.FILTER,
{
'mask_op': fsu.MaskOp.IN,
'op_kwargs': {'key': 'Country', 'values': ['Colombia']},
},
)
filter_query_2 = fsu.Query(
fsu.TransformOp.FILTER,
{
'mask_op': fsu.MaskOp.IN,
'op_kwargs': {
'key': 'Country',
'values': ['Colombia', 'Australia'],
},
},
)
scrub_query = fsu.Query(
fsu.TransformOp.SCRUB, {'key': 'bg_labels', 'values': ['ostric3']}
)
# First recording will be selected by both queries (i.e. duplicate). The
# following ensures it only appears once in the result when using
# CONCAT_NO_DUPLICATES
query_parallel = fsu.QueryParallel(
[filter_query_1, filter_query_2], fsu.MergeStrategy.CONCAT_NO_DUPLICATES
)
self.assertTrue(
fsu.apply_parallel(self.toy_df, query_parallel).equals(
self.toy_df.drop(2)
)
)
# In the following, we also apply scrubbing in the second Query. This
# scrubbing will modify the first recording, and therefore it shouldn't be
# counted as a duplicate anymore. In the final df, we should find two
# versions of the first recording (the original, and the scrubbed one).
query_parallel = fsu.QueryParallel(
[filter_query_1, fsu.QuerySequence([filter_query_2, scrub_query])],
fsu.MergeStrategy.CONCAT_NO_DUPLICATES,
)
scrubbed_r0 = self.toy_df.copy().loc[0]
scrubbed_r0['bg_labels'] = ['grerhe1']
# Here we don't use assertEqual with the .to_dict() because .to_dict()
# automatically removes duplicate indexes, making it impossible to know
# if duplicates were removed because of our merging strategy or because
# of .to_dict().
self.assertTrue(
fsu.apply_parallel(self.toy_df, query_parallel).equals(
pd.concat([
self.toy_df.loc[[0]],
pd.DataFrame([scrubbed_r0, self.toy_df.loc[1]]),
])
)
)
# Ensure the concatenation raises an error if the two dataframes don't have
# the exact same columns.
with self.assertRaises(RuntimeError):
fsu.concat_no_duplicates(
[self.toy_df, self.toy_df[['species_code', 'bg_labels']]]
)
class QuerySequenceTest(DataProcessingTest):
def test_untargeted_filter_scrub(self):
"""Ensure that applying a QuerySequence (no masking specified) works."""
filter_args = {
'mask_op': fsu.MaskOp.IN,
'op_kwargs': {'key': 'species_code', 'values': ['ostric3', 'ostric2']},
}
filter_query = fsu.Query(op=fsu.TransformOp.FILTER, kwargs=filter_args)
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={'key': 'bg_labels', 'values': ['ostric2']},
)
query_sequence = fsu.QuerySequence(queries=[filter_query, scrub_query])
df = fsu.apply_sequence(self.toy_df, query_sequence)
expected_df = pd.DataFrame({
'species_code': ['ostric2', 'ostric3'],
'Common name': [
'Common Ostrich',
'Somali Ostrich',
],
'bg_labels': [
['ostric3', 'grerhe1'],
['grerhe1'],
],
'Country': ['Colombia', 'Australia'],
})
self.assertEqual(expected_df.to_dict(), df.to_dict())
def test_targeted_filter_scrub(self):
"""Test QuerySequence on a subset of samples (w/ masking query)."""
filter_args = {
'mask_op': fsu.MaskOp.IN,
'op_kwargs': {'key': 'species_code', 'values': ['ostric3', 'grerhe1']},
}
filter_query = fsu.Query(op=fsu.TransformOp.FILTER, kwargs=filter_args)
scrub_query = fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={'key': 'bg_labels', 'values': ['ostric2']},
)
query_sequence = fsu.QuerySequence(
queries=[filter_query, scrub_query],
mask_query=fsu.Query(
op=fsu.MaskOp.IN,
kwargs={'key': 'Country', 'values': ['Colombia', 'Australia']},
),
)
df = fsu.apply_sequence(self.toy_df, query_sequence)
# In the example, only samples 1 and 3 have country values in
# ['Colombia', 'Australia']. Therefore, sample 2 will not be affected at all
# by any query. Sample 3 will be removed because of the first filtering
# query. Sample 1 will survive the first filtering query, but will be
# scrubbed out from its 'ostric2' bg_label.
expected_df = pd.DataFrame({
'species_code': ['ostric3', 'grerhe1'],
'Common name': ['Somali Ostrich', 'Greater Rhea'],
'bg_labels': [['grerhe1'], ['ostric2', 'ostric3']],
'Country': ['Australia', 'France'],
})
self.assertEqual(
expected_df.sort_values('species_code').to_dict('list'),
df.sort_values('species_code').to_dict('list'),
)
def test_nested_query_sequence(self):
filter_args = {
'mask_op': fsu.MaskOp.IN,
'op_kwargs': {'key': 'species_code', 'values': ['ostric3', 'grerhe1']},
}
filter_query = fsu.Query(fsu.TransformOp.FILTER, filter_args)
mask_query = fsu.Query(
fsu.MaskOp.IN, {'key': 'species_code', 'values': ['grerhe1']}
)
scrub_query = fsu.Query(
fsu.TransformOp.SCRUB, {'key': 'bg_labels', 'values': ['ostric2']}
)
equivalent_queries = [
fsu.QuerySequence(
[filter_query, fsu.QuerySequence([scrub_query], mask_query)]
),
fsu.QuerySequence(
[
filter_query,
fsu.QuerySequence([filter_query]),
fsu.QuerySequence([scrub_query], mask_query),
],
),
fsu.QuerySequence(
[
fsu.QuerySequence([filter_query]),
filter_query,
fsu.QuerySequence([scrub_query], mask_query),
],
),
fsu.QuerySequence(
[
fsu.QuerySequence([]),
filter_query,
fsu.QuerySequence([scrub_query], mask_query),
],
),
fsu.QuerySequence(
[
filter_query,
fsu.QuerySequence([filter_query, scrub_query], mask_query),
],
),
]
expected_df = self.toy_df.drop(0)
expected_df['bg_labels'] = [['ostric2', 'grerhe1'], ['ostric3']]
for query_sequence in equivalent_queries:
self.assertEqual(
expected_df.to_dict(),
fsu.apply_sequence(self.toy_df, query_sequence).to_dict(),
)
class FilterByClasslistTest(DataProcessingTest):
def test_filter_not_in_class_list(self):
"""Test filtering all items not in the class list ."""
filter_query = fsu.filter_not_in_class_list('species_code', 'tiny_species')
expected_df = pd.DataFrame({
'species_code': ['ostric3', 'grerhe1'],
'Common name': ['Somali Ostrich', 'Greater Rhea'],
'bg_labels': [['ostric2', 'grerhe1'], ['ostric2', 'ostric3']],
'Country': ['Australia', 'France'],
})
self.assertEqual(
fsu.apply_query(self.toy_df, filter_query).values.tolist(),
expected_df.values.tolist(),
)
def test_filter_in_class_list(self):
"""Test filtering all items not in the class list."""
filter_query = fsu.filter_in_class_list('species_code', 'tiny_species')
expected_df = pd.DataFrame({
'species_code': ['ostric2'],
'Common name': ['Common Ostrich'],
'bg_labels': [
['ostric3', 'grerhe1'],
],
'Country': ['Colombia'],
})
self.assertEqual(
fsu.apply_query(self.toy_df, filter_query).values.tolist(),
expected_df.values.tolist(),
)
# def test_filter_in_class_list(self):
def test_filter_contains_no_class_list(self):
"""Test filtering all items not in target class list ."""
filter_query = fsu.filter_contains_no_class_list(
'bg_labels', 'tiny_species'
)
expected_df = pd.DataFrame({
'species_code': ['ostric2'],
'Common name': ['Common Ostrich'],
'bg_labels': [['ostric3', 'grerhe1']],
'Country': ['Colombia'],
})
self.assertEqual(
fsu.apply_query(self.toy_df, filter_query).values.tolist(),
expected_df.values.tolist(),
)
def test_filter_contains_any_class_list(self):
"""Test filtering any items that is in target class list ."""
filter_query = fsu.filter_contains_any_class_list(
'bg_labels', 'tiny_species'
)
expected_df = pd.DataFrame({
'species_code': ['ostric3', 'grerhe1'],
'Common name': ['Somali Ostrich', 'Greater Rhea'],
'bg_labels': [['ostric2', 'grerhe1'], ['ostric2', 'ostric3']],
'Country': ['Australia', 'France'],
})
self.assertEqual(
fsu.apply_query(self.toy_df, filter_query).values.tolist(),
expected_df.values.tolist(),
)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MAE."""
from chirp.models import mae
from jax import numpy as jnp
from jax import random
from absl.testing import absltest
class MaeTest(absltest.TestCase):
def test_shapes(self):
batch_size = 16
image_size = (256, 512)
patch_size = (16, 16)
mask_rate = 0.8
hidden_size = 64
c = 2
h, w = image_size[0] // patch_size[0], image_size[1] // patch_size[1]
num_patches = int(h * w * (1 - mask_rate))
inputs = jnp.ones((batch_size,) + image_size + (c,))
rng = random.PRNGKey(0)
params_rng, mask_rng, dropout_rng = random.split(rng, num=3)
encoder = mae.Encoder(
mlp_dim=32,
num_layers=2,
num_heads=8,
patch_size=patch_size,
mask_rate=mask_rate,
hidden_size=hidden_size,
)
(encoded_patches, unmasked, masked), _ = encoder.init_with_output(
{"params": params_rng, "patch_mask": mask_rng, "dropout": dropout_rng},
inputs,
train=True,
)
self.assertEqual(
encoded_patches.shape, (batch_size, num_patches, hidden_size)
)
self.assertEqual(unmasked.shape, (batch_size, num_patches))
self.assertEqual(masked.shape, (batch_size, h * w - num_patches))
decoder = mae.Decoder(
output_size=image_size + (c,),
patch_size=patch_size,
mlp_dim=32,
num_layers=2,
num_heads=8,
)
decoded_patches, _ = decoder.init_with_output(
{"params": params_rng, "dropout": dropout_rng},
encoded_patches,
unmasked,
train=True,
)
self.assertEqual(
decoded_patches.shape,
(batch_size, h * w, patch_size[0] * patch_size[1] * c),
)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for HuBERT."""
import tempfile
from typing import Callable
from chirp import config_utils
from chirp.configs import config_globals
from chirp.configs import hubert_base_pq
from chirp.data import utils as data_utils
from chirp.preprocessing import pipeline
from chirp.tests import fake_dataset
from chirp.train import hubert as hubert_train
from clu import checkpoint
from flax import linen as nn
import jax
from jax import numpy as jnp
from jax import random
from ml_collections import config_dict
from absl.testing import absltest
class ConstantEarlyFeatureExtractor(nn.Module):
"""A no-op encoder for quickly testing train+test loops."""
conv_layer_tuples: tuple[tuple[int, int, int], ...]
dropout_prob: float = 0.0
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.gelu
deprecated_group_conv: bool = False
sz: int = 2
csz: int = 1
@nn.compact
def __call__(
self, inputs: jnp.ndarray, *unused_args, **unused_kwargs
) -> jnp.ndarray:
del unused_args, unused_kwargs
return jnp.zeros([inputs.shape[0], self.sz, self.csz])
class ConstantLateFeatureExtractor(nn.Module):
model_dims: int = 6
sz: int = 2
@nn.compact
def __call__(
self, inputs: jnp.ndarray, *unused_args, **unused_kwargs
) -> jnp.ndarray:
del unused_args, unused_kwargs
return [jnp.zeros([inputs.shape[0], self.sz, self.model_dims])]
class HuBERTTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.train_dir = tempfile.TemporaryDirectory("train_dir").name
self.data_dir = tempfile.TemporaryDirectory("data_dir").name
fake_builder = fake_dataset.FakeDataset(data_dir=self.data_dir)
fake_builder.download_and_prepare()
self.builder = fake_builder
self.sample_rate_hz = 32000
self.model_dims = 6
self.window_size_s = 5
# Adds only one product quantizer, in the melspec space.
self.num_centroids = 3
self.num_sections = 2
self.readout_points = (0,)
self.alpha = 1.0
self.config = self._get_test_config()
self.initialized_model = hubert_train.initialize_model(
workdir=self.train_dir,
num_train_steps=1,
conv_layer_tuples=tuple([(1, 10, 80000)]), # this leads to 2 frames.
early_fs_class=ConstantEarlyFeatureExtractor,
**self.config.init_config
)
(self.model_bundle, self.train_state, self.learn_rate_schedule) = (
self.initialized_model
)
self.model = self.model_bundle.model
self.key = self.model_bundle.key
self.model_state = self.train_state.model_state
self.params = self.train_state.params
def _get_test_dataset(self, config):
"""Gets the dataset to use for these tests."""
ds, dataset_info = data_utils.get_dataset(
"train",
dataset_directory=self.builder.data_dir,
pipeline=config.train_dataset_config.pipeline,
)
return ds, dataset_info
def _get_test_config(
self, config_module=hubert_base_pq
) -> config_dict.ConfigDict:
"""Reduces test config sizes to avoid memory blowouts."""
config = config_module.get_config()
config.sample_rate_hz = self.sample_rate_hz
config.train_config.num_train_steps = 1
config.train_config.log_every_steps = 1
config.train_config.checkpoint_every_steps = 1
config.eval_config.eval_steps_per_checkpoint = 1
config.init_config.base_quantizer_config.num_centroids = self.num_centroids
config.init_config.quantizer_config.num_sections = self.num_sections
config.init_config.model_config.readout_points = self.readout_points
config.init_config.model_config.mask_config.min_masks = 0
config.init_config.model_config.mask_config.mask_length = 1
config.init_config.frontend_config.stride = 80000 # yields 2 frames.
config.init_config.frontend_config.sample_rate = self.sample_rate_hz
config.init_config.input_size = self.window_size_s * self.sample_rate_hz
config = config_utils.parse_config(config, config_globals.get_globals())
config.init_config.model_config.late_feature_extractor = (
ConstantLateFeatureExtractor()
)
config.train_dataset_config.pipeline = pipeline.Pipeline(
ops=[
pipeline.OnlyJaxTypes(),
pipeline.ConvertBirdTaxonomyLabels(
source_namespace="ebird2021",
target_class_list="xenocanto",
add_taxonomic_labels=True,
),
pipeline.MixAudio(mixin_prob=0.0),
pipeline.Batch(batch_size=2, split_across_devices=True),
pipeline.RandomSlice(window_size=self.window_size_s),
pipeline.RandomNormalizeAudio(min_gain=0.15, max_gain=0.25),
]
)
config.eval_dataset_config.pipeline = pipeline.Pipeline(
ops=[
pipeline.OnlyJaxTypes(),
pipeline.MultiHot(),
pipeline.Batch(batch_size=2, split_across_devices=True),
pipeline.Slice(
window_size=self.window_size_s, start=0.0, names=("audio",)
),
pipeline.NormalizeAudio(target_gain=0.2, names=("audio",)),
]
)
return config
def test_shapes(self):
"""Test that the shapes of outputs returned by HuBERT are as expected."""
batch_size = 2
num_frames = 2
inputs = jnp.zeros([batch_size, self.window_size_s * self.sample_rate_hz])
step_key, key = random.split(self.key)
dropout_key, low_pass_key = random.split(step_key)
mask_key, _ = random.split(key)
variables = {"params": self.params, **self.model_state}
model_outputs, _ = self.model.apply(
variables,
inputs,
train=True,
mask_key=mask_key,
train_mode_quantizer=True,
mutable=list(self.model_state.keys()),
rngs={
"dropout": dropout_key,
"low_pass": low_pass_key,
},
)
# Ensure that the number of logits matches that of targets. There will be
# as many "sets" of these as there are quantizers. In this case it should
# be just one, since `quantizer_points` has a single element.
self.assertEqual(len(model_outputs.logits), len(model_outputs.targets))
self.assertLen(model_outputs.logits, 1)
# Ensure the shapes of embeddings and logits are as expected.
self.assertSequenceEqual(
model_outputs.embedding[-1].shape,
(batch_size, num_frames, self.model_dims),
)
self.assertSequenceEqual(
model_outputs.logits[-1].shape,
(self.num_sections, batch_size, num_frames, self.num_centroids),
)
def test_gradients(self):
"""Test that there is no gradient from HuBERT's loss to the quantizers."""
batch_size = 2
inputs = jnp.zeros([batch_size, self.window_size_s * self.sample_rate_hz])
step_key, key = random.split(self.key)
dropout_key, low_pass_key = random.split(step_key)
mask_key, _ = random.split(key)
def step(params, model_state):
variables = {"params": params, **model_state}
model_outputs, _ = self.model.apply(
variables,
inputs,
train=True,
mask_key=mask_key,
train_mode_quantizer=True,
mutable=list(model_state.keys()),
rngs={
"dropout": dropout_key,
"low_pass": low_pass_key,
},
)
hubert_loss = jnp.mean(
hubert_train.hubert_loss_from_outputs(
model_outputs, alpha=self.alpha, hubert_loss_mult=1.0
)
)
return hubert_loss
_, grads = jax.value_and_grad(step)(self.params, self.model_state)
self.assertIsNotNone(grads)
def get_all_leaves(d):
leaves = []
if not isinstance(d, dict):
leaves.append(d)
else:
for _, v in d.items():
leaves.extend(get_all_leaves(v))
return leaves
for k, v in grads.items():
if "quantizer" in k:
quantizer_grads = get_all_leaves(v)
for quant_grad in quantizer_grads:
self.assertTrue((quant_grad == jnp.zeros_like(quant_grad)).all())
def test_train_one_step(self):
"""Test one step of training."""
ds, _ = self._get_test_dataset(self.config)
hubert_train.train(
*self.initialized_model,
train_dataset=ds,
reload_quantizer=False,
logdir=self.train_dir,
num_train_steps=1,
log_every_steps=1,
checkpoint_every_steps=1,
num_quantizer_pretrain_steps=self.config.train_config.num_quantizer_pretrain_steps,
quant_loss_mult=0.0,
readout_loss_mult=0.0,
hubert_loss_mult=1.0
)
ckpt = checkpoint.MultihostCheckpoint(self.train_dir)
self.assertIsNotNone(ckpt.latest_checkpoint)
def test_eval_one_step(self):
ds, _ = self._get_test_dataset(self.config)
# Write a checkpoint, or else the eval will hang.
self.model_bundle.ckpt.save(self.train_state)
self.config.eval_config.num_train_steps = 0
hubert_train.evaluate(
model_bundle=self.model_bundle,
train_state=self.train_state,
learning_rate_schedule=self.learn_rate_schedule,
valid_dataset=ds,
workdir=self.train_dir,
eval_sleep_s=0,
**self.config.eval_config
)
ckpt = checkpoint.MultihostCheckpoint(self.train_dir)
self.assertIsNotNone(ckpt.latest_checkpoint)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for namespace_db."""
import io
import tempfile
from absl import logging
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
import numpy as np
import tensorflow as tf
from absl.testing import absltest
class NamespaceDbTest(absltest.TestCase):
def test_load_namespace_db(self):
db = namespace_db.load_db()
# Check a couple ClassLists of known size.
self.assertIn('caples', db.class_lists)
caples_list = db.class_lists['caples']
self.assertEqual(caples_list.namespace, 'ebird2021')
self.assertLen(caples_list.classes, 79)
genus_mapping = db.mappings['ebird2021_to_genus']
caples_genera = caples_list.apply_namespace_mapping(genus_mapping)
self.assertEqual(caples_genera.namespace, 'ebird2021_genera')
self.assertLen(caples_genera.classes, 62)
family_mapping = db.mappings['ebird2021_to_family']
caples_families = caples_list.apply_namespace_mapping(family_mapping)
self.assertEqual(caples_families.namespace, 'ebird2021_families')
self.assertLen(caples_families.classes, 30)
order_mapping = db.mappings['ebird2021_to_order']
caples_orders = caples_list.apply_namespace_mapping(order_mapping)
self.assertEqual(caples_orders.namespace, 'ebird2021_orders')
self.assertLen(caples_orders.classes, 11)
def test_class_maps(self):
db = namespace_db.load_db()
caples_list = db.class_lists['caples']
sierras_list = db.class_lists['sierra_nevadas']
table, image_mask = caples_list.get_class_map_tf_lookup(sierras_list)
# The Caples list is a strict subset of the Sierras list.
self.assertLen(caples_list.classes, np.sum(image_mask))
self.assertEqual(image_mask.shape, (len(sierras_list.classes),))
for i in range(len(caples_list.classes)):
self.assertGreaterEqual(
table.lookup(tf.constant([i], dtype=tf.int64)).numpy()[0], 0
)
def test_class_map_csv(self):
cl = namespace.ClassList(
'ebird2021', ('amecro', 'amegfi', 'amered', 'amerob')
)
cl_csv = cl.to_csv()
with io.StringIO(cl_csv) as f:
got_cl = namespace.ClassList.from_csv(f)
self.assertEqual(got_cl.namespace, 'ebird2021')
self.assertEqual(got_cl.classes, ('amecro', 'amegfi', 'amered', 'amerob'))
# Check that writing with tf.io.gfile behaves as expected, as newline
# behavior may be different than working with StringIO.
with tempfile.NamedTemporaryFile(suffix='.csv') as f:
with tf.io.gfile.GFile(f.name, 'w') as gf:
gf.write(cl_csv)
with open(f.name, 'r') as f:
got_cl = namespace.ClassList.from_csv(f.readlines())
self.assertEqual(got_cl.namespace, 'ebird2021')
self.assertEqual(got_cl.classes, ('amecro', 'amegfi', 'amered', 'amerob'))
def test_namespace_class_list_closure(self):
# Ensure that all classes in class lists appear in their namespace.
db = namespace_db.load_db()
all_missing_classes = set()
for list_name, class_list in db.class_lists.items():
missing_classes = set()
namespace = db.namespaces[class_list.namespace]
for cl in class_list.classes:
if cl not in namespace.classes:
missing_classes.add(cl)
all_missing_classes.add(cl)
if missing_classes:
logging.warning(
'The classes %s in class list %s did not appear in namespace %s.',
missing_classes,
list_name,
class_list.namespace,
)
missing_classes.discard('unknown')
all_missing_classes.discard('unknown')
self.assertEmpty(all_missing_classes)
def test_namespace_mapping_closure(self):
# Ensure that all classes in mappings appear in their namespace.
db = namespace_db.load_db()
all_missing_classes = set()
for mapping_name, mapping in db.mappings.items():
missing_source_classes = set()
missing_target_classes = set()
source_namespace = db.namespaces[mapping.source_namespace]
target_namespace = db.namespaces[mapping.target_namespace]
for source_cl, target_cl in mapping.mapped_pairs.items():
if source_cl not in source_namespace.classes:
missing_source_classes.add(source_cl)
all_missing_classes.add(source_cl)
if target_cl not in target_namespace.classes:
missing_target_classes.add(target_cl)
all_missing_classes.add(target_cl)
if missing_source_classes:
logging.warning(
'The classes %s in mapping %s did not appear in namespace %s.',
missing_source_classes,
mapping_name,
source_namespace.name,
)
if missing_target_classes:
logging.warning(
'The classes %s in mapping %s did not appear in namespace %s.',
missing_target_classes,
mapping_name,
target_namespace.name,
)
missing_target_classes.discard('unknown')
self.assertEmpty(all_missing_classes)
def test_taxonomic_mappings(self):
# Ensure that all ebird2021 species appear in taxonomic mappings.
db = namespace_db.load_db()
ebird = db.namespaces['ebird2021_species']
genera = db.mappings['ebird2021_to_genus'].mapped_pairs
families = db.mappings['ebird2021_to_family'].mapped_pairs
orders = db.mappings['ebird2021_to_order'].mapped_pairs
missing_genera = set()
missing_families = set()
missing_orders = set()
for cl in ebird.classes:
if cl not in genera:
missing_genera.add(cl)
if cl not in families:
missing_families.add(cl)
if cl not in orders:
missing_orders.add(cl)
self.assertEmpty(missing_genera)
self.assertEmpty(missing_families)
self.assertEmpty(missing_orders)
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for frontend."""
from chirp.models import frontend
from jax import numpy as jnp
from jax import random
from jax.experimental import jax2tf
import numpy as np
import tensorflow as tf
from absl.testing import absltest
from absl.testing import parameterized
class FrontendTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.length_s = 2
cls.sample_rate_hz = 11_025
cls.num_samples = cls.sample_rate_hz * cls.length_s
cls.batch_dims = (2, 3)
cls.signal = jnp.sin(jnp.linspace(0.0, 440 * jnp.pi, cls.num_samples))
cls.noise = 0.5 * random.normal(
random.PRNGKey(0), cls.batch_dims + (cls.num_samples,)
)
cls.audio = cls.signal + cls.noise
@parameterized.product(
(
{
"module_type": frontend.STFT,
"module_kwargs": {},
},
{
"module_type": frontend.MelSpectrogram,
"module_kwargs": {
"kernel_size": 128,
"sample_rate": 11_025,
"freq_range": (60, 5_000),
},
},
{
"module_type": frontend.SimpleMelspec,
"module_kwargs": {
"kernel_size": 128,
"sample_rate": 11_025,
"freq_range": (60, 5_000),
},
},
{
"module_type": frontend.LearnedFrontend,
"module_kwargs": {
"kernel_size": 256,
},
},
{
"module_type": frontend.MorletWaveletTransform,
"module_kwargs": {
"kernel_size": 256,
"sample_rate": 11_025,
"freq_range": (60, 10_000),
},
},
),
stride=(10, 11),
)
def test_output_size(self, module_type, module_kwargs, stride):
features = 7
module = module_type(stride=stride, features=features, **module_kwargs)
variables = module.init(random.PRNGKey(0), self.audio)
output = module.apply(variables, self.audio)
self.assertEqual(
output.shape,
self.batch_dims + (-(-self.num_samples // stride), features),
)
@parameterized.parameters(
(frontend.STFT, frontend.ISTFT, {}),
(
frontend.LearnedFrontend,
frontend.InverseLearnedFrontend,
{
"kernel_size": 256,
},
),
)
def test_inverse(self, module_type, inverse_module_type, module_kwargs):
stride = 10
features = 7
module = module_type(stride=stride, features=features, **module_kwargs)
variables = module.init(random.PRNGKey(0), self.audio)
output = module.apply(variables, self.audio)
inverse_module = inverse_module_type(stride=stride, **module_kwargs)
inverse_variables = inverse_module.init(random.PRNGKey(0), output)
inversed = inverse_module.apply(inverse_variables, output)
self.assertEqual(self.audio.shape, inversed.shape)
@parameterized.parameters(
{
"module_type": frontend.STFT,
"module_kwargs": {
"features": 129, # Note: Required that f-1=2**k for some k.
"stride": 64,
},
},
{
"module_type": frontend.MelSpectrogram,
"module_kwargs": {
"features": 32,
"stride": 64,
"kernel_size": 64,
"sample_rate": 22_025,
"freq_range": (60, 10_000),
},
"atol": 1e-4,
},
{
"module_type": frontend.SimpleMelspec,
"module_kwargs": {
"features": 32,
"stride": 64,
"kernel_size": 64,
"sample_rate": 22_025,
"freq_range": (60, 10_000),
},
"atol": 1e-4,
},
{
"module_type": frontend.LearnedFrontend,
"module_kwargs": {
"features": 32,
"stride": 64,
"kernel_size": 64,
},
},
{
"module_type": frontend.ISTFT,
"module_kwargs": {
"stride": 64,
},
"signal_shape": (1, 25, 64),
},
{
"module_type": frontend.InverseLearnedFrontend,
"module_kwargs": {
"stride": 32,
"kernel_size": 64,
},
"signal_shape": (1, 25, 64),
},
)
def test_tflite_stft_export(
self, module_type, module_kwargs, signal_shape=None, atol=1e-6
):
# Note that the TFLite stft requires power-of-two nfft, given by:
# nfft = 2 * (features - 1).
if signal_shape is None:
signal = self.audio
else:
signal = jnp.zeros(signal_shape, jnp.float32)
fe = module_type(**module_kwargs)
params = fe.init(random.PRNGKey(0), signal)
tf_predict = tf.function(
jax2tf.convert(
lambda signal: fe.apply(params, signal), enable_xla=False
),
input_signature=[
tf.TensorSpec(shape=signal.shape, dtype=tf.float32, name="input")
],
autograph=False,
)
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[tf_predict.get_concrete_function()], tf_predict
)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS, # enable TensorFlow ops.
]
tflite_float_model = converter.convert()
# Use the converted TFLite model.
interpreter = tf.lite.Interpreter(model_content=tflite_float_model)
interpreter.allocate_tensors()
input_tensor = interpreter.get_input_details()[0]
output_tensor = interpreter.get_output_details()[0]
interpreter.set_tensor(input_tensor["index"], signal)
interpreter.invoke()
output_tflite = interpreter.get_tensor(output_tensor["index"])
# Check approximate agreement of TFLite output with the jax function.
output_jax = fe.apply(params, signal)
np.testing.assert_allclose(output_tflite, output_jax, atol=1e-4)
def test_simple_melspec(self):
frontend_args = {
"features": 32,
"stride": 64,
"kernel_size": 64,
"sample_rate": 22_025,
"freq_range": (60, 10_000),
}
simple_mel = frontend.SimpleMelspec(**frontend_args)
simple_mel_params = simple_mel.init(random.PRNGKey(0), self.audio)
got_simple = simple_mel.apply(simple_mel_params, self.audio)
# Check that export works without SELECT_TF_OPS.
tf_predict = tf.function(
jax2tf.convert(
lambda signal: simple_mel.apply(simple_mel_params, signal),
enable_xla=False,
),
input_signature=[
tf.TensorSpec(
shape=self.audio.shape, dtype=tf.float32, name="input"
)
],
autograph=False,
)
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[tf_predict.get_concrete_function()], tf_predict
)
converter.target_spec.supported_ops = [
# Look, ma, no tf.lite.OpsSet.SELECT_TF_OPS!
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
]
tflite_float_model = converter.convert()
# Use the converted TFLite model.
interpreter = tf.lite.Interpreter(model_content=tflite_float_model)
interpreter.allocate_tensors()
input_tensor = interpreter.get_input_details()[0]
output_tensor = interpreter.get_output_details()[0]
interpreter.set_tensor(input_tensor["index"], self.audio)
interpreter.invoke()
output_tflite = interpreter.get_tensor(output_tensor["index"])
# Check approximate agreement of TFLite output with the jax function.
np.testing.assert_allclose(output_tflite, got_simple, atol=1e-4)
if __name__ == "__main__":
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from chirp.data import tfds_builder
import tensorflow_datasets as tfds
from absl.testing import absltest
def _manual_data_dir() -> str:
return os.path.join(
os.path.normpath(os.path.dirname(__file__)),
'testdata',
'tfds_builder_wav_directory_test',
)
class FakeWavDirectory(tfds_builder.WavDirectoryBuilder):
"""Test-only concrete subclass of the abstract base class under test."""
# A workaround for the following error in importlib_resources:
# 'NoneType' object has no attribute 'submodule_search_locations'
__module__ = tfds_builder.__name__
VERSION = tfds.core.Version('1.2.3')
RELEASE_NOTES = {
'1.2.3': 'Initial release.',
}
def _description(self) -> str:
return 'Unit test fixture with WAV files in a nested directory structure.'
def _citation(self) -> str:
return 'FakeWavDirectory, private communication, October, 2022.'
class WavDirectoryDatasetUnfilteredTest(tfds.testing.DatasetBuilderTestCase):
"""Tests WavDirectoryDataset with "unfiltered" configuration."""
DATASET_CLASS = FakeWavDirectory
EXAMPLE_DIR = _manual_data_dir()
BUILDER_CONFIG_NAMES_TO_TEST = ['unfiltered']
SPLITS = {'train': 3}
@classmethod
def setUpClass(cls):
super().setUpClass()
# TODO(bartvm): Remove when tensorflow-datasets PyPI package is fixed
@absltest.skip
def test_tags_are_valid(self):
pass
class WavDirectoryDatasetSlicePeakedTest(tfds.testing.DatasetBuilderTestCase):
"""Tests WavDirectoryDataset with "slice_peaked" configuration."""
DATASET_CLASS = FakeWavDirectory
EXAMPLE_DIR = _manual_data_dir()
BUILDER_CONFIG_NAMES_TO_TEST = ['slice_peaked']
SPLITS = {'train': 2}
@classmethod
def setUpClass(cls):
super().setUpClass()
# TODO(bartvm): Remove when tensorflow-datasets PyPI package is fixed
@absltest.skip
def test_tags_are_valid(self):
pass
if __name__ == '__main__':
tfds.testing.test_main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bird taxonomy dataset tests."""
import shutil
import tempfile
from unittest import mock
from chirp.data.soundscapes import soundscapes
from chirp.data.soundscapes import soundscapes_lib
from chirp.taxonomy import namespace
from etils import epath
import pandas as pd
import tensorflow_datasets as tfds
from absl.testing import absltest
def mock_localization_fn(audio, sr, interval_length_s, max_intervals):
del audio
del max_intervals
target_length = sr * interval_length_s
return [(0, target_length)]
class SoundscapeTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for the soundscape dataset."""
DATASET_CLASS = soundscapes.Soundscapes
BUILDER_CONFIG_NAMES_TO_TEST = [
config.name
for config in DATASET_CLASS.BUILDER_CONFIGS
if config.name in ['caples']
]
EXAMPLE_DIR = DATASET_CLASS.code_path.parent / 'placeholder_data'
DL_EXTRACT_RESULT = {
'segments': EXAMPLE_DIR / 'test.csv',
}
SPLITS = {'train': 3}
SKIP_CHECKSUMS = True
@classmethod
def setUpClass(cls):
super().setUpClass()
# `self.create_tempdir()` raises an UnparsedFlagAccessError, which is why
# we use `tempdir` directly.
cls.tempdir = tempfile.mkdtemp()
_ = tfds.core.lazy_imports.librosa
cls.metadata_patcher = mock.patch.object(soundscapes_lib, 'load_class_list')
cls.loc_patcher = mock.patch.object(
cls.DATASET_CLASS.BUILDER_CONFIGS[0],
'localization_fn',
mock_localization_fn,
)
cls.url_patcher = mock.patch.object(
cls.DATASET_CLASS.BUILDER_CONFIGS[0],
'audio_dir',
epath.Path(cls.tempdir),
)
# We mock the localization part with a function that finds signal in the
# first interval_length_s (5 sec.). This means that fake segments 1, 2 and 4
# should be selected. Segment 3 should not be selected (not overlap with
# the localization_fn output) and segment 5 should also be skipped because
# the annotation is invalid (end < start). So we should end up with 3
# recordings.
cls.loc_patcher.start()
mock_load_class_list = cls.metadata_patcher.start()
mock_load_class_list.return_value = namespace.ClassList(
'test_namespace',
['fakecode1', 'fakecode2', 'superrare', 'superfly'],
)
fake_segments = pd.read_csv(cls.EXAMPLE_DIR / 'test.csv')
fake_segments['ebird_codes'] = fake_segments['ebird_codes'].apply(
lambda codes: codes.split()
)
cls.url_patcher.start()
subdir = epath.Path(cls.tempdir) / 'caples' / 'audio'
subdir.mkdir(parents=True)
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=100000).export(
subdir / 'soundscape_1.flac', format='flac'
)
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=100000).export(
subdir / 'soundscape_2.wav', format='wav'
)
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=100000).export(
subdir / 'soundscape_3.wav', format='wav'
)
tfds.core.lazy_imports.pydub.AudioSegment.silent(duration=100000).export(
subdir / 'soundscape_4.wav', format='wav'
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.metadata_patcher.stop()
cls.loc_patcher.stop()
cls.url_patcher.stop()
shutil.rmtree(cls.tempdir)
# TODO(bartvm): Remove when tensorflow-datasets PyPI package is fixed
@absltest.skip
def test_tags_are_valid(self):
pass
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A fake dataset for testing."""
from typing import Tuple
from chirp.data import bird_taxonomy
import numpy as np
def window_signal(signal, win_len=2000):
ramp = np.linspace(0.0, 1.0, win_len // 2)
signal[: win_len // 2] *= ramp
signal[-win_len // 2 :] *= ramp[::-1]
return signal
def make_dsp_chirp(n_samples, start_f, stop_f, linear=True):
"""Creates a np.array with a sine sweep, aka a `chirp`."""
time = np.linspace(0.0, 1.0, n_samples)
space_fn = np.linspace if linear else np.geomspace
sweep_f = space_fn(start_f, stop_f, n_samples)
chirp_ = np.sin(np.pi * sweep_f * time)
return window_signal(chirp_)
def make_random_dsp_chirps(total_len, gain=0.25):
"""Makes a clip with two random sine sweeps (chirps)."""
n_samples = total_len // 4
chirp_clip = np.zeros((total_len,))
ch1_beg, ch2_beg = np.random.choice(np.arange(300, 2001, 50), size=(2,))
ch1_end, ch2_end = np.random.choice(np.arange(800, 3801, 50), size=(2,))
chirp1 = make_dsp_chirp(n_samples, ch1_beg, ch1_end, linear=False)
chirp2 = make_dsp_chirp(n_samples, ch2_beg, ch2_end, linear=True)
chirp_clip[n_samples : n_samples * 2] = chirp1
chirp_clip[int(n_samples * 2.5) : int(n_samples * 3.5)] = chirp2
return chirp_clip * gain
class FakeDataset(bird_taxonomy.BirdTaxonomy):
"""Fake dataset."""
def _split_generators(self, dl_manager):
return {
'train': self._generate_examples(100),
'test': self._generate_examples(20),
}
@staticmethod
def _make_signal(shape: Tuple[int, ...]) -> np.ndarray:
return np.random.uniform(-1.0, 0.99, shape)
def _generate_one_example(self, i):
return {
'audio': self._make_signal(self.info.features['audio'].shape).astype(
np.float32
),
'recording_id': i,
'segment_id': -1 + i,
'segment_start': 17,
'segment_end': 17 + self.info.features['audio'].shape[0],
'label': np.random.choice(self.info.features['label'].names, size=[3]),
'bg_labels': np.random.choice(
self.info.features['bg_labels'].names, size=[2]
),
'filename': 'placeholder',
'quality_score': np.random.choice(['A', 'B', 'C', 'D', 'E', 'F']),
'license': '//creativecommons.org/licenses/by-nc-sa/4.0/',
'country': np.random.choice(['South Africa', 'Colombia', 'Namibia']),
'altitude': str(np.random.uniform(0, 3000)),
'length': np.random.choice(['1:10', '0:01']),
'bird_seen': np.random.choice(['yes', 'no']),
'latitude': str(np.random.uniform(0, 90)),
'longitude': str(np.random.uniform(0, 90)),
'playback_used': 'yes',
'recordist': 'N/A',
'remarks': 'N/A',
'sound_type': np.random.choice(['call', 'song']),
}
def _generate_examples(self, num_examples):
for i in range(num_examples):
yield i, self._generate_one_example(i)
class FakeChirpDataset(FakeDataset):
"""Fake dataset with DSP chirps; useful for debugging separation."""
@staticmethod
def _make_signal(shape: Tuple[int, ...]) -> np.ndarray:
return make_random_dsp_chirps(shape[0])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A CLU metric that averages values separately for each class."""
from typing import Any
from clu import metrics
import flax
from jax import numpy as jnp
@flax.struct.dataclass
class ClassAverage(metrics.Metric):
"""A metric that calculates the average over each class.
This metric assumes that it's given a score for each example in the batch
along with a mapping from batches to (potentially multiple) classes in the
form a multi-hot encoding.
"""
total: jnp.ndarray
count: jnp.ndarray
@classmethod
def empty(cls):
return cls(total=jnp.zeros((1,), float), count=jnp.zeros((1,), int))
@classmethod
def from_model_output(
cls, values: tuple[jnp.ndarray, jnp.ndarray], **_
) -> metrics.Metric:
return cls(total=values[0] @ values[1], count=jnp.sum(values[1], axis=0))
def merge(self, other: "ClassAverage") -> "ClassAverage":
return type(self)(
total=self.total + other.total,
count=self.count + other.count,
)
def compute(self) -> Any:
# Avoid introducing NaNs due to classes without positive labels
class_means = jnp.where(self.count > 0, self.total / self.count, 0.0)
return jnp.sum(class_means) / jnp.sum(self.count > 0)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for training and validation."""
from typing import Any, Dict
from jax import lax
from jax import numpy as jnp
from jax import scipy
def map_(
logits: jnp.ndarray,
labels: jnp.ndarray,
label_mask: jnp.ndarray | None = None,
sort_descending: bool = True,
) -> jnp.ndarray:
return average_precision(
scores=logits,
labels=labels,
label_mask=label_mask,
sort_descending=sort_descending,
)
def cmap(
logits: jnp.ndarray,
labels: jnp.ndarray,
sort_descending: bool = True,
sample_threshold: int = 0,
) -> Dict[str, Any]:
"""Class mean average precision."""
class_aps = average_precision(
scores=logits.T, labels=labels.T, sort_descending=sort_descending
)
mask = jnp.sum(labels, axis=0) > sample_threshold
class_aps = jnp.where(mask, class_aps, jnp.nan)
macro_cmap = jnp.mean(class_aps, where=mask)
return {
'macro': macro_cmap,
'individual': class_aps,
}
def roc_auc(
logits: jnp.ndarray,
labels: jnp.ndarray,
label_mask: jnp.ndarray | None = None,
sort_descending: bool = True,
sample_threshold: int = 0,
) -> Dict[str, Any]:
"""Computes ROC-AUC scores.
Args:
logits: A score for each label which can be ranked.
labels: A multi-hot encoding of the ground truth positives. Must match the
shape of scores.
label_mask: A mask indicating which labels to involve in the calculation.
sort_descending: An indicator if the search result ordering is in descending
order (e.g. for evaluating over similarity metrics where higher scores are
preferred). If false, computes average_precision on descendingly sorted
inputs.
sample_threshold: Determines whether or not to compute the metric if there
are fewer than `sample_threshold` datapoints.
Returns:
A dictionary of ROC-AUC scores using the arithmetic ('macro') and
geometric means, along with individual class ('individual') ROC-AUC and its
variance.
"""
if label_mask is not None:
label_mask = label_mask.T
class_roc_auc, class_roc_auc_var = generalized_mean_rank(
logits.T, labels.T, label_mask=label_mask, sort_descending=sort_descending
)
mask = jnp.sum(labels, axis=0) > sample_threshold
class_roc_auc = jnp.where(mask, class_roc_auc, jnp.nan)
class_roc_auc_var = jnp.where(mask, class_roc_auc_var, jnp.nan)
return {
'macro': jnp.mean(class_roc_auc, where=mask),
'geometric': jnp.exp(jnp.mean(jnp.log(class_roc_auc), where=mask)),
'individual': class_roc_auc,
'individual_var': class_roc_auc_var,
}
def negative_snr_loss(
source: jnp.ndarray,
estimate: jnp.ndarray,
max_snr: float = 1e6,
eps: float = 1e-8,
) -> jnp.ndarray:
"""Negative SNR loss with max SNR term.
Args:
source: Groundtruth signal.
estimate: Estimated signal.
max_snr: SNR threshold which minimizes loss. The default 1e6 yields an
unbiased SNR calculation.
eps: Log stabilization epsilon.
Returns:
Loss tensor.
"""
snrfactor = 10.0 ** (-max_snr / 10.0)
ref_pow = jnp.sum(jnp.square(source), axis=-1)
bias = snrfactor * ref_pow
numer = 10.0 * jnp.log10(ref_pow + eps)
err_pow = jnp.sum(jnp.square(source - estimate), axis=-1)
return 10 * jnp.log10(bias + err_pow + eps) - numer
def average_precision(
scores: jnp.ndarray,
labels: jnp.ndarray,
label_mask: jnp.ndarray | None = None,
sort_descending: bool = True,
interpolated: bool = False,
) -> jnp.ndarray:
"""Average precision.
The average precision is the area under the precision-recall curve. When
using interpolation we take the maximum precision over all smaller recalls.
The intuition is that it often makes sense to evaluate more documents if the
total percentage of relevant documents increases.
Average precision is computed over the last axis.
Args:
scores: A score for each label which can be ranked.
labels: A multi-hot encoding of the ground truth positives. Must match the
shape of scores.
label_mask: A mask indicating which labels to involve in the calculation.
sort_descending: An indicator if the search result ordering is in descending
order (e.g. for evaluating over similarity metrics where higher scores are
preferred). If false, computes average_precision on descendingly sorted
inputs.
interpolated: Whether to use interpolation.
Returns:
The average precision.
"""
if label_mask is not None:
# Set all masked labels to zero, and send the scores for those labels to a
# low/high value (depending on whether we sort in descending order or not).
# Then the masked scores+labels will not impact the average precision
# calculation.
labels = labels * label_mask
extremum_score = (
jnp.min(scores) - 1.0 if sort_descending else jnp.max(scores) + 1.0
)
scores = jnp.where(label_mask, scores, extremum_score)
idx = jnp.argsort(scores)
if sort_descending:
idx = jnp.flip(idx, axis=-1)
scores = jnp.take_along_axis(scores, idx, axis=-1)
labels = jnp.take_along_axis(labels, idx, axis=-1)
pr_curve = jnp.cumsum(labels, axis=-1) / (jnp.arange(labels.shape[-1]) + 1)
if interpolated:
pr_curve = lax.cummax(pr_curve, reverse=True, axis=-1)
# In case of an empty row, assign precision = 0, and avoid dividing by zero.
mask = jnp.float32(jnp.sum(labels, axis=-1) != 0)
raw_av_prec = jnp.sum(pr_curve * labels, axis=-1) / jnp.maximum(
jnp.sum(labels, axis=-1), 1.0
)
return mask * raw_av_prec
def generalized_mean_rank(
scores: jnp.ndarray,
labels: jnp.ndarray,
label_mask: jnp.ndarray | None = None,
sort_descending: bool = True,
) -> tuple[jnp.ndarray, jnp.ndarray]:
"""Computes the generalized mean rank and its variance over the last axis.
The generalized mean rank can be expressed as
(sum_i #P ranked above N_i) / (#P * #N),
or equivalently,
1 - (sum_i #N ranked above P_i) / (#P * #N).
This metric is usually better visualized in the logits domain, where it
reflects the log-odds of ranking a randomly-chosen positive higher than a
randomly-chosen negative.
Args:
scores: A score for each label which can be ranked.
labels: A multi-hot encoding of the ground truth positives. Must match the
shape of scores.
label_mask: A mask indicating which labels to involve in the calculation.
sort_descending: An indicator if the search result ordering is in descending
order (e.g. for evaluating over similarity metrics where higher scores are
preferred). If false, computes the generalize mean rank on descendingly
sorted inputs.
Returns:
The generalized mean rank and its variance. The variance is calculated by
considering each positive to be an independent sample of the value
1 - #N ranked above P_i / #N. This gives a measure of how consistently
positives are ranked.
"""
idx = jnp.argsort(scores, axis=-1)
if sort_descending:
idx = jnp.flip(idx, axis=-1)
labels = jnp.take_along_axis(labels, idx, axis=-1)
if label_mask is None:
label_mask = True
else:
label_mask = jnp.take_along_axis(label_mask, idx, axis=-1)
num_p = (labels > 0).sum(axis=-1, where=label_mask)
num_p_above = jnp.cumsum((labels > 0) & label_mask, axis=-1)
num_n = (labels == 0).sum(axis=-1, where=label_mask)
num_n_above = jnp.cumsum((labels == 0) & label_mask, axis=-1)
gmr = num_p_above.mean(axis=-1, where=(labels == 0) & label_mask) / num_p
gmr_var = (num_n_above / num_n[:, None]).var(
axis=-1, where=(labels > 0) & label_mask
)
return gmr, gmr_var
def least_squares_solve_mix(matrix, rhs, diag_loading=1e-3):
# Assumes a real-valued matrix, with zero mean.
adj_matrix = jnp.conjugate(jnp.swapaxes(matrix, -1, -2))
cov_matrix = jnp.matmul(adj_matrix, matrix)
# diag_loading ensures invertibility of the (pos. semi-definite) cov_matrix.
cov_matrix += diag_loading * jnp.eye(
cov_matrix.shape[-1], dtype=cov_matrix.dtype
)
return scipy.linalg.solve(
cov_matrix, jnp.matmul(adj_matrix, rhs), sym_pos=True
)
def least_squares_mixit(reference, estimate):
"""Applies loss_fn after finding the best fit MixIt assignment."""
# Reference shape is [B, M, T]
# Estimate shape is [B, C, T]
mix_matrix = least_squares_solve_mix(
jnp.swapaxes(estimate, -1, -2), jnp.swapaxes(reference, -1, -2)
)
mix_matrix = jnp.swapaxes(mix_matrix, -1, -2)
max_per_column = jnp.max(mix_matrix, axis=-2, keepdims=True)
mix_matrix = jnp.where(mix_matrix == max_per_column, 1.0, 0.0)
best_mix = mix_matrix @ estimate
return best_mix, mix_matrix
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked autoencoder for spectrograms."""
import flax.linen as nn
from jax import numpy as jnp
from jax import random
import optax
from scenic.model_lib.layers import attention_layers
from scenic.projects.baselines import vit
def get_patches(x: jnp.ndarray, patch_size: tuple[int, int]) -> jnp.ndarray:
"""Split input into patches.
Args:
x: The input to split into patches. Must be of size (..., height, width,
channels).
patch_size: The size of the patches, (patch height, patch width).
Returns:
The patches of x as size (..., height / patch height, width / patch width,
patch height, patch width).
"""
# For a single batch dimension, should be equivalent to:
# lax.conv_general_dilated_patches(x, patch_size, patch_size, 'VALID',
# dimension_numbers=('NHWC', 'OIHW', 'NHWC'))
*b, h, w, c = x.shape
ph, pw = patch_size
if h % ph != 0 or w % pw != 0:
raise ValueError('patch size does not divide image size')
x = jnp.reshape(x, b + [h // ph, ph, w // pw, pw, c])
return jnp.swapaxes(x, -3, -4)
def merge_patches(x: jnp.ndarray) -> jnp.ndarray:
"""Reshape patched image into single image.
Args:
x: The patches of size (..., height, width, patch height, patch width,
channels).
Returns:
The image of size (..., height * patch height, width * patch width,
channels).
"""
*b, h, w, ph, pw, c = x.shape
x = jnp.swapaxes(x, -3, -4)
return jnp.reshape(x, b + [h * ph, w * pw, c])
class Encoder(nn.Module):
"""Encode patches.
Following the Masked Spectrogram Modelling paper this uses a ViT-B encoder.
We add 1D sinusoidal embeddings to the patches before masking out a fraction
of patches at random, only encoding the unmasked ones.
This is the same encoder as used by the MAEs that Listen paper. The MAE-AST
paper uses a slight variation that only has half the number of layers (6).
Attributes:
mlp_dim: Dimension of the MLP on top of attention block.
num_layers: Number of transformer layers.
num_heads: Number of self-attention heads.
patch_size: Size of the patches (as a tuple). Patches are non-overlapping.
mask_rate: The fraction of patches to mask.
hidden_size: Size of the linear embedding of the patches.
dropout_rate: Dropout rate.
attention_dropout_rate: Dropout for attention heads.
stochastic_depth: The layer dropout probability.
class_token: Whether or not to prepend a zero-initialized learned class
token to the patches.
"""
patch_size: tuple[int, int] = (16, 16)
mlp_dim: int = 3072
num_layers: int = 12
num_heads: int = 12
mask_rate: float = 0.75
hidden_size: int = 768
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.0
stochastic_depth: float = 0.0
class_token: bool = False
@nn.compact
def __call__(self, x: jnp.ndarray, *, train: bool):
"""Mask the patches and encode the remaining ones.
Note that the indices of masked and unmasked patches can be used as
follows, where x is an array of size (batch, patches, ...):
jnp.take_along_axis(x, indices[..., jnp.newaxis], axis=1)
or
x.at[jnp.arange(n)[:, jnp.newaxis], indices]
Args:
x: An image/spectrogram.
train: Whether or not this is training.
Returns:
The encoded patches and the indices of the unmasked and masked patches.
The encoded patches are of the shape (batch, unmasked patches, features).
"""
if jnp.ndim(x) != 4:
raise ValueError('x must be of shape (batch, height, width, channels')
fh, fw = self.patch_size
# Extracting patches and then embedding is in fact a single convolution.
x = nn.Conv(
self.hidden_size,
(fh, fw),
strides=(fh, fw),
padding='VALID',
name='embedding',
)(x)
n, h, w, c = x.shape
# Add positional embeddings
x = jnp.reshape(x, [n, h * w, c])
x = attention_layers.Add1DPositionEmbedding(posemb_init=None)(x)
num_patches = h * w
indices = jnp.tile(jnp.arange(h * w), (n, 1))
if train:
num_patches = int(num_patches * (1 - self.mask_rate))
rng = self.make_rng('patch_mask')
indices = random.permutation(rng, indices, axis=1, independent=True)
unmasked, masked = indices[:, :num_patches], indices[:, num_patches:]
x = jnp.take_along_axis(x, unmasked[..., jnp.newaxis], axis=1)
# If we want to add a class token, add it here.
if self.class_token:
class_token = self.param(
'class_token', nn.initializers.zeros, (1, 1, c), x.dtype
)
class_token = jnp.tile(class_token, [n, 1, 1])
x = jnp.concatenate([class_token, x], axis=1)
x = vit.Encoder(
mlp_dim=self.mlp_dim,
num_layers=self.num_layers,
num_heads=self.num_heads,
positional_embedding='none',
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
stochastic_depth=self.stochastic_depth,
)(x, train=train)
return x, unmasked, masked
class Embedder(nn.Module):
encoder: Encoder = Encoder()
taxonomy_loss_weight: float = 0.0
@nn.compact
def __call__(
self,
x: jnp.ndarray,
*,
train: bool,
use_running_average: bool | None = None
):
encoded_patches, _, _ = self.encoder(x, train=train)
embedding = jnp.mean(encoded_patches, axis=-2)
return optax.scale_gradient(embedding, 0.01)
class Decoder(nn.Module):
"""Decode patches.
This decoder follows the Masked Spectrogram Modeling paper, which follows
ViT-S (384 dimensions, 6 heads) with a reduced number of layers (4).
The MAE-AST paper uses ViT-B instead (12 heads, 768 dimensions) but with only
2 layers.
The MAEs that Listen paper uses a deeper, custom decoder (8 layers when using
global attention) with 512 dimensions.
Attributes:
output_size: The size of the output image (height, width, channels).
patch_size: The size of each patch (height and width).
mlp_dim: Dimension of the MLP on top of attention block.
num_layers: Number of transformer layers.
num_heads: Number of self-attention heads.
hidden_size: Size of the linear embedding of the patches.
dropout_rate: Dropout rate.
attention_dropout_rate: Dropout for attention heads.
stochastic_depth: The layer dropout probability.
"""
output_size: tuple[int, int, int]
patch_size: tuple[int, int] = (16, 16)
mlp_dim: int = 1536
num_layers: int = 4
num_heads: int = 6
hidden_size: int = 384
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.0
stochastic_depth: float = 0.0
@nn.compact
def __call__(self, x: jnp.ndarray, unmasked: jnp.ndarray, *, train: bool):
"""Decode the patches.
Args:
x: The embeddings of the unmasked patches.
unmasked: The indices of the unmasked patches in the spectrogram.
train: Whether this is training time.
Returns:
The decoded patches, of the form (batch, patches, features). The number
of features is equal to the patch size times number of channels.
"""
# First restore the patches in their correct order and use mask tokens
n, num_patches, features = x.shape
h, w, c = (
self.output_size[0] // self.patch_size[0],
self.output_size[1] // self.patch_size[1],
self.output_size[2],
)
if unmasked.shape != (n, num_patches):
raise ValueError('shape of encoded patches and mask do not match')
mask_token = self.param(
'mask_token', nn.initializers.zeros, (1, 1, features), x.dtype
)
embeddings = jnp.tile(mask_token, (n, h * w, 1))
embeddings = embeddings.at[jnp.arange(n)[:, jnp.newaxis], unmasked].set(x)
if features != self.hidden_size:
x = nn.Dense(features=self.hidden_size)(embeddings)
# Transformer decoder
x = vit.Encoder(
mlp_dim=self.mlp_dim,
num_layers=self.num_layers,
num_heads=self.num_heads,
positional_embedding='sinusoidal_1d',
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
stochastic_depth=self.stochastic_depth,
)(x, train=train)
x = nn.Dense(features=self.patch_size[0] * self.patch_size[1] * c)(x)
return x
class MaskedAutoencoder(nn.Module):
"""A masked autoencoder."""
encoder: nn.Module
decoder: nn.Module
@nn.compact
def __call__(self, x: jnp.ndarray, *, train: bool):
"""Apply masked autoencoder.
Args:
x: An image of size (batch, height, width, channels).
train: Whether this is training.
Returns:
The decoded patches (of shape (batch, patches, features)).
"""
if self.encoder.patch_size != self.decoder.patch_size:
raise ValueError('patch sizes do not match')
encoded_patches, unmasked, masked = self.encoder(x, train=train)
decoded_patches = self.decoder(encoded_patches, unmasked, train=train)
patches = get_patches(x, self.encoder.patch_size)
patches = jnp.reshape(patches, decoded_patches.shape)
return decoded_patches, patches, masked
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handcrafted features for linear models."""
from flax import linen as nn
from jax import numpy as jnp
from jax import scipy as jsp
class HandcraftedFeatures(nn.Module):
"""Handcrafted features for linear models.
Attributes:
compute_mfccs: If True, turn log-melspectrograms into MFCCs.
num_mfccs: How many MFCCs to keep. Unused if compute_mfccs is False.
aggregation: How to aggregate over time. If 'beans', we concatenate the
mean, standard deviation, min, and max over the time axis (which mirrors
the processing done in the BEANS benchmark (Hagiwara et al., 2022)). If
'avg_pool', we perform average pooling over the time axis (controlled by
`window_size` and `window_stride`) before flattening the time and channel
axes. If `flatten`, we simply flatten the time and channel axes.
window_size: Average pooling window size. Unused if `aggregation` is not
`avg_pool`.
window_stride: Average pooling window stride. Unused if `aggregation` is not
`avg_pool`.
"""
compute_mfccs: bool = False
num_mfccs: int = 20
aggregation: str = 'avg_pool'
window_size: int = 10
window_stride: int = 10
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
use_running_average: bool | None = None,
) -> jnp.ndarray:
del train
del use_running_average
# Reshape from [B, T, D, 1] to [B, T, D]
outputs = jnp.squeeze(inputs, axis=[-1])
if self.compute_mfccs:
outputs = jsp.fft.dct(
outputs,
type=2,
n=self.num_mfccs,
axis=-1,
norm='ortho',
)
if self.aggregation == 'beans':
return jnp.concatenate(
[
outputs.mean(axis=-2),
outputs.std(axis=-2),
outputs.min(axis=-2),
outputs.max(axis=-2),
],
axis=-1,
)
elif self.aggregation in ('flatten', 'avg_pool'):
if self.aggregation == 'avg_pool':
outputs = nn.pooling.avg_pool(
outputs,
window_shape=(self.window_size,),
strides=(self.window_stride,),
)
# Reshape from [B, T, D] to [B, T * D]
return outputs.reshape(
outputs.shape[0], outputs.shape[1] * outputs.shape[2]
)
else:
raise ValueError(f'unrecognized aggregation: {self.aggregation}')
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Continuous wavelet transforms.
This module contains filters (Gabor, sinc) and wavelets (Morlet, Morse) that can
be used in a continuous wavelet transform.
Filtering can be done through convolution in the time-domain or by
multiplication in the frequency domain. Note that some filters are
non-differentiable in the frequency domain (sinc filters) whereas others don't
have an analytic representation in the time domain (Morse wavelets). Hence we
use both approaches.
Filters and wavelets can be normalized in different ways. Let φ(t) be the
time-domain filter and Φ(f) the frequency-domain filter. Different normalization
options are:
* ∫|φ(t)| = 1 (L1 normalized, bandpass normalized)
* ∫|φ(t)|² = ∫|Φ(t)|² = 1 (L2 normalized, energy normalized)
* max |Φ(f)| = 1 (peak frequency response)
Note that max |Φ(f)| ≤ ∫|φ(t)|.
There are a variety of Fourier transform conventions. This code follows the
one from NumPy where the ordinary frequency is used: f̂(ω) = ∫f(t)exp(-i2πωt)dt.
For all parametrized filters we follow the approach of the continuous wavelet
transform (CWT) and learn a scale for each filter: φ(t/s) for s > 0. To keep the
function norm unchanged across scalings, we have to scale the values by 1/s (for
L1 normalization, time domain only) or 1/√s (for L2 normalization).
For reference implementations, see the Python package `ssqueezepy` and the
MATLAB package jLab.
"""
import enum
from typing import Callable
import chirp.signal
from jax import lax
from jax import numpy as jnp
from jax import scipy as jsp
class Domain(enum.Enum):
TIME = "time"
FREQUENCY = "frequency"
class Normalization(enum.Enum):
L1 = "l1"
L2 = "l2"
def gabor_filter(
sigma: float, domain: Domain, normalization: Normalization
) -> Callable[[jnp.ndarray], jnp.ndarray]:
"""A one-dimensional Gabor filter.
The Gabor filter is a complex sinusoid modulated by a Gaussian. Its frequency
response is a Gaussian with mean sigma / 2π and standard deviation 1 / 2π[^1].
For small values of sigma this filter has a non-zero response to non-positive
frequencies. This means that it is not a wavelet (it fails the admissibility
condition)[^2] and it is not analytic[^3].
The zero-mean, shifted version of this filter is often called the (complex)
Morlet wavelet, Gabor wavelet, or Gabor kernel. The Gabor filter is sometimes
also referred to as the Gabor function[^4].
To match `leaf_audio.impulse_response.gabor_impulse_response`: Use L1
normalization. Set sigma to the product of η and σ and then use σ as the
scaling factor.
[^1]: Movellan, Javier R. "Tutorial on Gabor filters." (2002).
[^2]: Valens, Clemens. "A really friendly guide to wavelets." (1999).
[^3]: Lilly, Jonathan M., and Sofia C. Olhede. "On the analytic wavelet
transform." IEEE transactions on information theory 56.8 (2010): 4135-4156.
[^4]: Bernardino, Alexandre, and José Santos-Victor. "A real-time gabor primal
sketch for visual attention." Iberian Conference on Pattern Recognition and
Image Analysis. Springer, Berlin, Heidelberg, 2005.
Args:
sigma: The parameter of the function.
domain: The domain.
normalization: What normalization to use.
Returns:
A function which calculates the filter over the time or frequency domain.
"""
if domain is Domain.TIME:
if normalization is Normalization.L1:
norm = 1 / jnp.sqrt(2 * jnp.pi)
elif normalization is Normalization.L2:
norm = jnp.pi ** (-1 / 4)
def _gabor_filter(t: jnp.ndarray) -> jnp.ndarray:
sinusoids = jnp.exp(1j * t * sigma)
gaussian = jnp.exp(-1 / 2 * t**2)
return norm * gaussian * sinusoids
elif domain is Domain.FREQUENCY:
if normalization is Normalization.L1:
norm = 1.0
elif normalization is Normalization.L2:
norm = jnp.pi ** (1 / 4) * jnp.sqrt(2)
def _gabor_filter(f: jnp.ndarray) -> jnp.ndarray:
gaussian = jnp.exp(-1 / 2 * (sigma - f * 2 * jnp.pi) ** 2)
return norm * gaussian
return _gabor_filter
def sinc_filter(
sigma: float, domain: Domain, normalization: Normalization
) -> Callable[[jnp.ndarray], jnp.ndarray]:
"""A sinc filter.
Rather than being parameterized by its upper and lower frequency, this sinc
filter is parametrized by its central frequency. The width of the filter can
then be set by the scaling factor (i.e., scaling the inputs).
The sinc filter is not differentiable in the frequency domain.
The L1 norm of the sinc filter diverges to infinity[^1], so L1 normalization
is not supported.
[^1]: Borwein, David, Jonathan M. Borwein, and Isaac E. Leonard. "L p norms
and the sinc function." The American Mathematical Monthly 117.6 (2010):
528-539.
Args:
sigma: The central frequency of the function.
domain: The domain.
normalization: What normalization to use.
Returns:
A function which calculates the filter over the time or frequency domain.
Raises:
ValueError: If L1 normalization is requested.
"""
if normalization is Normalization.L1:
raise ValueError("sinc filter does not support L1 normalization")
if domain is Domain.TIME:
def _sinc_filter(t: jnp.ndarray) -> jnp.ndarray:
shift = jnp.exp(2j * jnp.pi * t * sigma)
# NOTE: Normalized sinc function
return shift * jnp.sinc(t)
elif domain is Domain.FREQUENCY:
def _sinc_filter(f: jnp.ndarray) -> jnp.ndarray:
return jnp.where(jnp.abs(f - sigma) < 1 / 2, 1.0, 0.0)
return _sinc_filter
def morlet_wavelet(
sigma: float, domain: Domain, normalization: Normalization
) -> Callable[[jnp.ndarray], jnp.ndarray]:
"""A Morlet wavelet.
This wavelet is a sinusoid modulated by a Gaussian which is shifted down in
order to have zero mean (admissibility condition). It has a non-zero response
to negative frequencies, so it is not analytic.
For large values of sigma this wavelet is approximately equal to a Gabor
filter. See `gabor_filter` for details regarding naming.
The peak frequency of this wavelet is the solution `wc` to the equation
wc * 2π = sigma / (1 - exp(-sigma * wc * 2π)). This can be found using fixed
point iteration.
Args:
sigma: The parameter which allows the wavelet to trade-off between time and
frequency resolution.
domain: The domain.
normalization: What normalization to use.
Returns:
A function which calculates the filter over the time or frequency domain.
"""
if normalization is Normalization.L1:
# TODO(bartvm): Does an expression exist for this?
raise NotImplementedError
# Follows notation from, e.g., https://en.wikipedia.org/wiki/Morlet_wavelet
kappa = jnp.exp(-1 / 2 * sigma**2)
c = (1 + jnp.exp(-(sigma**2)) - 2 * jnp.exp(-3 / 4 * sigma**2)) ** (
-1 / 2
)
if domain is Domain.TIME:
def _morlet_wavelet(t: jnp.ndarray) -> jnp.ndarray:
return (
c
* jnp.pi ** (-1 / 4)
* jnp.exp(-1 / 2 * t**2)
* (jnp.exp(1j * sigma * t) - kappa)
)
elif domain is Domain.FREQUENCY:
def _morlet_wavelet(f: jnp.ndarray) -> jnp.ndarray:
f = jnp.pi * 2 * f
return (
c
* jnp.pi ** (1 / 4)
* jnp.sqrt(2)
* (
jnp.exp(-1 / 2 * (sigma - f) ** 2)
- kappa * jnp.exp(-1 / 2 * f**2)
)
)
return _morlet_wavelet
def morse_wavelet(
gamma: float, beta: float, domain: Domain, normalization: Normalization
) -> Callable[[jnp.ndarray], jnp.ndarray]:
"""A Morse wavelet.
For a general overview of Morse wavelets see Lilly and Olhede[^1][^2]. For
the mathematical details of higher-order wavelets, see Olhede and Walden[^3].
This code follows the notation in Lilly and Olhede.
This wavelet is analytic (i.e., it has zero response for negative
frequencies). It has no analytic expression in the time-domain.
The peak frequency is equal to (beta / gamma)^(1 / gamma) and the width
(duration) scales as sqrt(beta * gamma).
[^1]: Lilly, Jonathan M., and Sofia C. Olhede. "Generalized Morse wavelets as
a superfamily of analytic wavelets." IEEE Transactions on Signal
Processing 60.11 (2012): 6036-6041.
[^2]: Lilly, Jonathan M., and Sofia C. Olhede. "Higher-order properties of
analytic wavelets." IEEE Transactions on Signal Processing 57.1 (2008):
146-160.
[^3]: Olhede, Sofia C., and Andrew T. Walden. "Generalized morse wavelets."
IEEE Transactions on Signal Processing 50.11 (2002): 2661-2670.
Args:
gamma: A parameter which controls the high-frequency decay. A common choice
is 3, in which case it defines the family of Airy wavelets (which are
similar to the commonly used Morlet and Gabor wavelets). See figure 1 in
Lilly and Olhede (2012) for the relationship between gamma and other
wavelet families. Gamma must be positive.
beta: A parameter which controls the behavior near the zero frequency. When
gamma is equal to 3, increasing this has a similar effect as increasing
the parameter of a Morlet wavelet. Beta must be positive.
domain: The domain.
normalization: What normalization to use.
Returns:
A function which calculates the wavelet over the frequency domain.
"""
if domain is not Domain.FREQUENCY:
raise ValueError(
"Morse wavelets have no analytic expression in the time domain"
)
r = (2 * beta + 1) / gamma
# NOTE: Computations in log-space for numerical stability
if normalization is Normalization.L2:
log_norm = (jnp.log(gamma) + r * jnp.log(2) - jsp.special.gammaln(r)) / 2
elif normalization is Normalization.L1:
log_norm = jnp.log(gamma) - jsp.special.gammaln((1 + beta) / gamma)
def _morse_wavelet(f: jnp.ndarray) -> jnp.ndarray:
f_nonneg = f >= 0
f *= f_nonneg
return jnp.exp(log_norm + beta * jnp.log(f) - f**gamma) * f_nonneg
return _morse_wavelet
def melspec_params(
num_mel_bins: int,
sample_rate: float,
lower_edge_hertz: float,
upper_edge_hertz: float,
) -> jnp.ndarray:
"""Gets the peak frequencies and bandwidths of a standard mel-filterbank.
This assumes a Gaussian frequency response (i.e., Gabor filters) and matches
the full width at half maximum (FWHM) of the square root of the triangle
filter to the FWHM of this Gaussian.
Args:
num_mel_bins: The number of mel bandpass filters.
sample_rate: The sampling rate of the signal. Used to calculate the Nyquist
frequency, which determines the upper bound on frequencies in the signal.
lower_edge_hertz: The lowest frequency to generate filters for.
upper_edge_hertz: The highest frequency to generate filters for.
Returns:
The central frequencies and the inverse bandwidth (normalized).
"""
# The melspec triangle filters are equally spaced in the mel-scale
range_ = map(chirp.signal.hertz_to_mel, (lower_edge_hertz, upper_edge_hertz)) # pytype: disable=wrong-arg-types # jax-ndarray
bands = chirp.signal.mel_to_hertz(jnp.linspace(*range_, num_mel_bins + 2))
# Convert from Hertz to normalized frequencies
bands = bands / sample_rate * jnp.pi * 2
# Triangle filters with peak 1, but we take the square root so the the slopes
# reach the half maximum 1/2 at 1/4
fwhms = ((3 * bands[2:] + bands[1:-1]) - (bands[1:-1] + 3 * bands[:-2])) / 4
# To convert from FWHM to standard deviation for a Gaussian
coeff = 2 * jnp.sqrt(2 * jnp.log(2))
inv_fwhms = coeff / fwhms
return bands[1:-1], inv_fwhms # pytype: disable=bad-return-type # jax-ndarray
def convolve_filter(
filter_: Callable[[jnp.ndarray], jnp.ndarray],
signal: jnp.ndarray,
scale_factors: jnp.ndarray,
normalization: Normalization,
window_size_frames: int,
stride: tuple[int, ...] = (1,),
padding: str = "SAME",
) -> jnp.ndarray:
"""Convolves a given set of filters with a signal in the time domain.
Note that this takes the conjugate of the filter in order to match the usual
conventions of continuous wavelet transforms.
Args:
filter_: A time-domain filter which takes radians as inputs.
signal: A batch of signals (assumed to be in the format `NWC`).
scale_factors: Each filter has a scale associated with it.
normalization: The normalization to use.
window_size_frames: The width of the window to use, in frames.
stride: The stride to use for the convolution.
padding: The padding to use for the convolution.
Returns:
The signals filtered with the given filters.
"""
ts = jnp.arange(-(window_size_frames // 2), (window_size_frames + 1) // 2)
ts = ts[:, jnp.newaxis] / scale_factors
if normalization is Normalization.L1:
norm = 1 / scale_factors
elif normalization is Normalization.L2:
norm = 1 / jnp.sqrt(scale_factors)
sampled_filters = norm * jnp.conj(filter_(ts))
# We assume a single input channel
sampled_filters = sampled_filters[:, jnp.newaxis]
dn = lax.conv_dimension_numbers(
signal.shape, sampled_filters.shape, ("NWC", "WIO", "NWC")
)
# TODO(bartvm): Not all platforms (e.g., TF Lite) support complex inputs for
# convolutions. Can be addressed by convolving with the real/imaginary parts
# separately in the future if needed.
# TODO(bartvm): Converting signal to complex because JAX wants the input and
# filters to be the same type, but this results in 33% more multiplications
# than necessary, so this is probably not the fastest option.
signal = signal.astype(jnp.complex64)
filtered_signal = lax.conv_general_dilated(
signal, sampled_filters, stride, padding, (1,), (1,), dn
)
return filtered_signal
def multiply_filter(
filter_: Callable[[jnp.ndarray], jnp.ndarray],
signal: jnp.ndarray,
scale_factors: jnp.ndarray,
normalization: Normalization,
) -> jnp.ndarray:
"""Applies a filter to a signal in the frequency domain.
This takes the DFT of the given signals and applies the given filter.
Args:
filter_: The filter in the frequency domain to apply.
signal: A batch of signals, assumed to have time and channels as the last
two axes.
scale_factors: The scale factors to apply to each kernel.
normalization: The normalization to use.
Returns:
The result of applying the filter to the signal.
"""
*_, num_frames, _ = signal.shape
fs = jnp.fft.fftfreq(num_frames)
fs = fs[:, jnp.newaxis] * scale_factors
# TODO(bartvm): TF Lite might not support IFFT as a built-in operation, but
# IFFT is just an FFT with the sign of the inputs changed so easy to adapt to.
# TODO(bartvm): Note that the signal is real-valued, so using FFT might do
# unnecessary computation. Might be faster to use RFFT and then take the
# complex conjugates manually.
filtered_signal = jnp.fft.ifft(
jnp.fft.fft(signal, axis=-2) * filter_(fs), axis=-2
)
if normalization is Normalization.L1:
norm = 1
elif normalization is Normalization.L2:
norm = jnp.sqrt(scale_factors)
return norm * filtered_signal
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learned pooling module."""
from typing import Callable
from flax import linen as nn
import jax
from jax import lax
from jax import numpy as jnp
def gaussian(M: int, std: float, sym: bool = True) -> jnp.ndarray: # pylint: disable=invalid-name
"""Returns a Gaussian window.
Port of scipy.signal.windows.gaussian.
Args:
M: Number of points in the output window.
std: The standard deviation, sigma.
sym: Must be `True` (present for compatibility with SciPy' signature).
Returns:
The window, with the maximum value normalized to 1 (though the value 1 does
not appear if M is even).
"""
if not sym:
raise ValueError("Periodic windows not supported")
n = jnp.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = jnp.exp(-(n**2) / sig2)
return w
def gaussian_init(
key: jnp.ndarray, num_channels: int, window_size: int, std: float = 0.4
) -> jnp.ndarray:
"""Initializes Gaussian windows.
Args:
key: RNG, unused.
num_channels: The number of windows to calculate.
window_size: The number of steps in the window (which is assumed to range
from -1 to 1).
std: The standard deviation of the Gaussian.
Returns:
A one-tuple containing an array with `num_channels` entries. These represent
the standard deviation scaled by the window size.
"""
del key
return (std * 0.5 * (window_size - 1) * jnp.ones((num_channels,)),) # pytype: disable=bad-return-type # jax-ndarray
class WindowPool(nn.Module):
"""Pools using a window function.
Note that is not a pooling function in the traditional sense, i.e., it does
not use a reduction operator applied to the elements in each window. Instead,
a weighted average is taken over the window. If the weighting is given by a
parametrized window, e.g., a Gaussian, then these parameters are learned. This
allows the model to interpolate between subsampling (a Gaussian with zero
variance) and average pooling (a Gaussian with infinite variance).
When using a Gaussian window, there are a few differences with the
implementation in LEAF[^1]. Firstly, this module by default scales the weights
to sum to unity. This ensure that the energy of the output signal is the same
as the input. Secondly, this module does not perform clipping on the window
parameters. This is expected to be done during optimization.
[^1]: https://github.com/google-research/leaf-audio
Attributes:
window: The window function to use. Should follow the conventions of the
`scipy.signal.windows` functions.
window_size: The size of the pooling window.
window_init: Initializer of the window parameters. It should take as an
argument an RNG key, the number of filters, and the width of the window,
and return a tuple of parameters. Each parameter should have the number of
filters as its first axis.
normalize_window: Whether or not to normalize the window to sum to 1.
stride: The stride to use.
padding: Padding to use.
"""
window: Callable[..., jnp.ndarray]
window_size: int
window_init: Callable[[jnp.ndarray, int, int], jnp.ndarray]
normalize_window: bool = True
stride: int = 1
padding: str = "SAME"
@nn.compact
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Applies the pooling.
Args:
inputs: The input array must be of shape `(batch, time, channels)`. Each
channel will have its own window applied. In the case of a parametrized
window, each channel will have its own parameters.
Returns:
The pooled outputs of shape (batch, time, channels).
"""
num_channels = inputs.shape[-1]
window_params = self.param(
"window_params", self.window_init, num_channels, self.window_size
)
window_values = jax.vmap(
self.window, in_axes=(None,) + (0,) * len(window_params)
)(self.window_size, *window_params)
if self.normalize_window:
window_values /= jnp.sum(window_values, axis=1, keepdims=True)
window_values = window_values.T[:, jnp.newaxis]
dn = lax.conv_dimension_numbers(
inputs.shape, window_values.shape, ("NWC", "WIO", "NWC")
)
return lax.conv_general_dilated(
inputs,
window_values,
(self.stride,),
self.padding,
dimension_numbers=dn,
feature_group_count=num_channels,
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model frontends.
A frontend is the part of the model that transforms a sampled audio signal into
a set of features. This module provides Flax modules that can be used
interchangeably.
For some frontends it also defines inverses (e.g., for separation models).
"""
import dataclasses
from chirp import audio_utils
from chirp import signal
from chirp.models import cwt
from flax import linen as nn
import jax
from jax import lax
from jax import numpy as jnp
from jax import scipy as jsp
@dataclasses.dataclass
class LogScalingConfig:
"""Configuration for log-scaling of mel-spectrogram."""
floor: float = 1e-2
offset: float = 0.0
scalar: float = 0.1
@dataclasses.dataclass
class PCENScalingConfig:
"""Configuration for PCEN normalization of mel-spectrogram."""
smoothing_coef: float = 0.1
gain: float = 0.5
bias: float = 2.0
root: float = 2.0
eps: float = 1e-6
spcen: bool = False
conv_width: int = 256
ScalingConfig = LogScalingConfig | PCENScalingConfig
def frames_mask(mask: jnp.ndarray, stride: int) -> jnp.ndarray:
"""Converts a mask of samples to a mask of frames.
Args:
mask: Array of size (..., time).
stride: The stride used by the frontend.
Returns:
An array of size (..., frames) where frames = ceil(time / stride).
"""
length = mask.shape[-1]
num_frames = -(-length // stride)
pad_width = ((0, 0),) * (mask.ndim - 1) + ((0, num_frames * stride - length),)
mask = jnp.pad(mask, pad_width)
frame_masks = jnp.reshape(mask, mask.shape[:-1] + (num_frames, stride))
return jnp.any(frame_masks, axis=-1)
class Frontend(nn.Module):
"""A audio frontend.
An audio frontend takes an input of size (..., time) and outputs an array of
size (..., frames, features) where frames = ceil(time / stride). That is,
it should behave the same as applying a set of 1D convolutions with `SAME`
padding.
Attributes:
features: The number of features (channels) that the frontend should output.
stride: The stride to use. For an STFT this is sometimes called the hop
length.
"""
features: int
stride: int
# TODO(bartvm): Add ScalingConfig with kw_only=True in Python 3.10
def _magnitude_scale(self, inputs):
# Apply frequency scaling
scaling_config = self.scaling_config
if isinstance(scaling_config, LogScalingConfig):
outputs = audio_utils.log_scale(
inputs, **dataclasses.asdict(scaling_config)
)
elif isinstance(scaling_config, PCENScalingConfig):
kwargs = dataclasses.asdict(scaling_config)
if kwargs.pop("spcen"):
init_smoothing_coef = (
jnp.ones((self.features,)) * scaling_config.smoothing_coef
)
smoothing_coef = self.param(
"spcen_smoothing_coef", lambda _: init_smoothing_coef
)
smoothing_coef = jnp.clip(smoothing_coef, 0, 1)
kwargs["smoothing_coef"] = smoothing_coef
outputs, _ = audio_utils.pcen(inputs, **kwargs)
elif scaling_config is None:
outputs = inputs
else:
raise ValueError("Unrecognized scaling mode.")
return outputs
class InverseFrontend(nn.Module):
"""An inverse frontend.
This takes features of the form (..., frames, features) and outputs
(..., time), the inverse of the frontend.
Note that frontends are usually only invertible when the stride is a divisor
of the input length.
Attributes:
stride: The stride that was used for the frontend. This tells the inverse
frontend how many samples to generate per step.
"""
stride: int
class STFT(Frontend):
"""Short-term Fourier transform.
This module uses a Hann window.
For efficiency, it might be useful to set the number of features to 2^n + 1
for some non-negative integer n. This will guarantee that the length of the
FFT is a power of two.
Note that if magnitude scaling is used, this frontend is no longer invertible.
Attribute:
power: If given, calculate the magnitude spectrogram using the given power.
The default is 2.0 for the power spectrogram. Pass 1.0 to get the energy
spectrogram. If `None`, then the complex-valued STFT will be returned.
scaling_config: The magnitude scaling configuration to use.
"""
power: float | None = 2.0
scaling_config: ScalingConfig | None = None
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
if self.power is None and self.scaling_config is not None:
raise ValueError("magnitude scaling requires a magnitude spectrogram")
# For a real-valued signal the number of frequencies returned is n // 2 + 1
# so we set the STFT window size to return the correct number of features.
nfft = nperseg = (self.features - 1) * 2
_, _, stfts = jsp.signal.stft(
inputs,
nperseg=nperseg,
noverlap=nperseg - self.stride,
nfft=nfft,
padded=False,
)
# STFT does not use SAME padding (i.e., padding with a total of nperseg -
# stride). Instead it pads with nperseg // 2 on both sides, so the total
# amount of padding depends on whether nperseg is even or odd. The final
# output size is (t + stride - (nperseg % 2)) // stride. In our case nperseg
# is even, so that means we have t // stride + 1 elements. That is one
# element too many when the stride is a divisor of the input length.
if inputs.shape[-1] % self.stride == 0:
stfts = stfts[..., :-1]
stfts = jnp.swapaxes(stfts, -1, -2)
stfts = jnp.abs(stfts) ** self.power if self.power is not None else stfts
return self._magnitude_scale(stfts)
class ISTFT(InverseFrontend):
"""Inverse short-term Fourier transform.
This module uses a Hann window.
"""
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
nfft = nperseg = (inputs.shape[-1] - 1) * 2
# The STFT transformation threw away the last time step to match our output
# shape expectations. We'll just pad it with zeros to get it back.
inputs = jnp.swapaxes(inputs, -1, -2)
pad_width = ((0, 0),) * (inputs.ndim - 1) + ((0, 1),)
inputs = jnp.pad(inputs, pad_width, "edge")
_, istfts = jsp.signal.istft(
inputs, nperseg=nperseg, noverlap=nperseg - self.stride, nfft=nfft
)
return istfts
class MelSpectrogram(Frontend):
"""Mel-spectrogram frontend.
This frontend begins by calculating the short-term Fourier transform of the
audio using a Hann window and padding. Next, it constructs a mel-spectrogram:
It takes the magnitude of the STFT (power spectrogram), maps the frequencies
to the mel-scale, and bins frequencies together using a series of partially
overlapping triangle filters.
Then an optional scaling step is applied, which can be the logarithm (i.e., a
log power spectrum as used by mel-frequency cepstrums) or PCEN. The smoothing
coefficients of PCEN can optionally be learned as is done by the LEAF
frontend (sPCEN).
Finally, the last few frames are discarded so that the number of output
frames is the expected size (i.e., similar to what you would expect when
doing a set of 1D convolutions with the same kernel size and stride and
`SAME` padding).
Attributes:
kernel_size: The window size to use for the STFT.
sample_rate: The sampling rate of the inputs. This is used to calculate the
conversion to mel-scale.
freq_range: The frequencies to include in the output. Frequencies outside of
this range are simply discarded.
scaling_config: The magnitude scaling configuration to use.
"""
kernel_size: int
sample_rate: int
freq_range: tuple[float, float]
power: float = 2.0
scaling_config: ScalingConfig | None = None
nfft: int | None = None
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
# Calculate power spectrogram
_, _, stfts = jsp.signal.stft(
inputs,
nperseg=self.kernel_size,
noverlap=self.kernel_size - self.stride,
nfft=self.nfft,
padded=False,
)
# See notes in STFT regarding output size
if inputs.shape[-1] % self.stride == 0:
stfts = stfts[..., :-1]
stfts = jnp.swapaxes(stfts, -1, -2)
magnitude_spectrograms = jnp.abs(stfts) ** self.power
# Construct mel-spectrogram
num_spectrogram_bins = magnitude_spectrograms.shape[-1]
mel_matrix = signal.linear_to_mel_weight_matrix(
self.features, num_spectrogram_bins, self.sample_rate, *self.freq_range
)
output = magnitude_spectrograms @ mel_matrix
return self._magnitude_scale(output)
class SimpleMelspec(Frontend):
"""Minimal RFFT-based Melspec implementation."""
kernel_size: int
sample_rate: int
freq_range: tuple[int, int]
power: float = 2.0
scaling_config: ScalingConfig | None = None
nfft: int | None = None
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
flat_inputs = jnp.reshape(inputs, (-1,) + inputs.shape[-1:] + (1,))
# Note that Scipy uses VALID padding, with additional padding logic.
# As a result, the outputs are numerically inequivalent.
framed = jax.lax.conv_general_dilated_patches(
flat_inputs,
(self.kernel_size,),
(self.stride,),
"SAME",
dimension_numbers=("NTC", "OIT", "NTC"),
)
window = jnp.hanning(self.kernel_size)
# The scipy stft default scaling resolves down to this...
# For the stft, the scalar is squared then sqrt'ed.
window *= 1.0 / window.sum()
windowed = window[jnp.newaxis, jnp.newaxis, :] * framed
stfts = jnp.fft.rfft(windowed, n=self.nfft, axis=-1)
mags = stfts.real**2 + stfts.imag**2
if self.power == 1.0:
mags = jnp.sqrt(mags)
elif self.power == 2.0:
pass
else:
mags = mags ** (self.power / 2.0)
n_bins = mags.shape[-1]
mel_matrix = signal.linear_to_mel_weight_matrix(
num_mel_bins=self.features,
num_spectrogram_bins=n_bins,
sample_rate=self.sample_rate,
lower_edge_hertz=self.freq_range[0],
upper_edge_hertz=self.freq_range[1],
)
output = mags @ mel_matrix
output = jnp.reshape(output, inputs.shape[:-1] + output.shape[-2:])
return self._magnitude_scale(output)
class MFCC(Frontend):
"""MFC coefficients frontend.
This frontend begins by calculating the mel-spectrogram of the audio, then
computes its discrete cosine transform.
Attributes:
mel_spectrogram_frontend: Frontend used for computing mel-spectrograms out
of audio sequences.
num_coefficients: Number of MFC coefficients to keep.
aggregate_over_time: If True, aggregate the MFCs (of shape [..., num_frames,
num_coefficients]) over the time axis using mean, standard deviation, min,
and max operations. The result is four tensors of shape [...,
num_coefficients] that are then concatenated into a single output of shape
[..., 4 * num_coefficients]. This mirrors the processing done in the BEANS
benchmark (Hagiwara et al., 2022).
"""
mel_spectrogram_frontend: MelSpectrogram
num_coefficients: int | None = None
aggregate_over_time: bool = True
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
mel_spectrograms = self.mel_spectrogram_frontend(inputs, train)
outputs = jsp.fft.dct(
mel_spectrograms, type=2, n=self.num_coefficients, axis=-1, norm="ortho"
)
if self.aggregate_over_time:
outputs = jnp.concatenate(
[
outputs.mean(axis=-2),
outputs.std(axis=-2),
outputs.min(axis=-2),
outputs.max(axis=-2),
],
axis=-1,
)
return outputs
class LearnedFrontend(Frontend):
"""Learned filters.
This frontend is a small wrapper around `nn.Conv`. It learns a filter bank
where the filters are the convolutional kernels.
Note that if magnitude scaling is used, this frontend is no longer invertible.
Attributes:
kernel_size: The size of the convolutional filters.
scaling_config: The magnitude scaling configuration to use.
"""
kernel_size: int
scaling_config: ScalingConfig | None = None
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
output = nn.Conv(
features=self.features,
kernel_size=(self.kernel_size,),
strides=(self.stride,),
)(
# Collapse batch dimensions and add a single channel
jnp.reshape(inputs, (-1,) + inputs.shape[-1:] + (1,))
)
output = jnp.reshape(output, inputs.shape[:-1] + output.shape[-2:])
return self._magnitude_scale(output)
class InverseLearnedFrontend(InverseFrontend):
"""Thin wrapper around a Conv1DTranspose.
A small wrapper around `nn.ConvTranspose`. It learns the inverse of a filter
bank where the filters are convolutional kernels.
Attributes:
kernel_size: The size of the convolutional filters.
"""
kernel_size: int
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
output = nn.ConvTranspose(
features=1, kernel_size=(self.kernel_size,), strides=(self.stride,)
)(jnp.reshape(inputs, (-1,) + inputs.shape[-2:]))
output = jnp.reshape(output, inputs.shape[:-2] + output.shape[-2:])
return jnp.squeeze(output, -1)
class MorletWaveletTransform(Frontend):
"""Morlet wavelet transform.
The Morlet wavelet transform is a wavelet transformation using Morlet
wavelets. This is like a short-term Fourier transform with Gaussian windows,
but where the window size is different for each frequency. This allows for
arbitrary trade-offs of the time- and frequency resolution.
Note that technically speaking this module uses Gabor filters instead of
Morlet wavelets. Gabor filters don't have the constant shift required to make
them invertible for low frequencies, but in practice this barely matters.
The LEAF frontend uses this transformation with stride 1 as the first step.
Like LEAF, we initialize the Gabor filters to resemble a mel-spectrogram with
the given frequency range.
Attributes:
kernel_size: The kernel size to use for the filters.
sample_rate: The sample rate of the input. Used to interpret the frequency
range for initilizing the filters.
freq_range: The filters are initialized to resemble a mel-spectrogram. These
values determine the minimum and maximum frequencies of those filters.
"""
kernel_size: int
sample_rate: int
freq_range: tuple[int, int]
power: float = 2.0
scaling_config: ScalingConfig | None = None
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool = True) -> jnp.ndarray:
input_signal = jnp.reshape(inputs, (-1,) + inputs.shape[-1:] + (1,))
params = cwt.melspec_params(
self.features, self.sample_rate, *self.freq_range
)
gabor_mean = self.param("gabor_mean", lambda rng: params[0])
gabor_std = self.param("gabor_std", lambda rng: params[1])
sigma = gabor_mean * gabor_std
gabor_filter = cwt.gabor_filter(
sigma, cwt.Domain.TIME, cwt.Normalization.L1
)
filtered_signal = cwt.convolve_filter(
gabor_filter,
input_signal,
gabor_std,
cwt.Normalization.L1,
self.kernel_size,
stride=(self.stride,),
)
power_signal = jnp.abs(filtered_signal) ** self.power
scaled_signal = self._magnitude_scale(power_signal)
output = jnp.reshape(
scaled_signal, inputs.shape[:-1] + scaled_signal.shape[-2:]
)
return output
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet.
Implementation of the EfficientNet model in Flax.
"""
import enum
import math
from typing import NamedTuple
from chirp.models import layers
from flax import linen as nn
from jax import numpy as jnp
class EfficientNetModel(enum.Enum):
"""Different variants of EfficientNet."""
B0 = "b0"
B1 = "b1"
B2 = "b2"
B3 = "b3"
B4 = "b4"
B5 = "b5"
B6 = "b6"
B7 = "b7"
B8 = "b8"
L2 = "l2"
class EfficientNetStage(NamedTuple):
"""Definition of a single stage in EfficientNet."""
num_blocks: int
features: int
kernel_size: tuple[int, int]
strides: int
expand_ratio: int
# The values for EfficientNet-B0. The other variants are scalings of these.
# See table 1 in the paper or
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_builder.py
STEM_FEATURES = 32
STAGES = [
EfficientNetStage(1, 16, (3, 3), 1, 1),
EfficientNetStage(2, 24, (3, 3), 2, 6),
EfficientNetStage(2, 40, (5, 5), 2, 6),
EfficientNetStage(3, 80, (3, 3), 2, 6),
EfficientNetStage(3, 112, (5, 5), 1, 6),
EfficientNetStage(4, 192, (5, 5), 2, 6),
EfficientNetStage(1, 320, (3, 3), 1, 6),
]
HEAD_FEATURES = 1280
REDUCTION_RATIO = 4
class EfficientNetScaling(NamedTuple):
"""Scaling for different model variants."""
width_coefficient: float
depth_coefficient: float
dropout_rate: float
SCALINGS = {
EfficientNetModel.B0: EfficientNetScaling(1.0, 1.0, 0.2),
EfficientNetModel.B1: EfficientNetScaling(1.0, 1.1, 0.2),
EfficientNetModel.B2: EfficientNetScaling(1.1, 1.2, 0.3),
EfficientNetModel.B3: EfficientNetScaling(1.2, 1.4, 0.3),
EfficientNetModel.B4: EfficientNetScaling(1.4, 1.8, 0.4),
EfficientNetModel.B5: EfficientNetScaling(1.6, 2.2, 0.4),
EfficientNetModel.B6: EfficientNetScaling(1.8, 2.6, 0.5),
EfficientNetModel.B7: EfficientNetScaling(2.0, 3.1, 0.5),
EfficientNetModel.B8: EfficientNetScaling(2.2, 3.6, 0.5),
EfficientNetModel.L2: EfficientNetScaling(4.3, 5.3, 0.5),
}
def round_features(
features: int, width_coefficient: float, depth_divisor: int = 8
) -> int:
"""Round number of filters based on width multiplier."""
features *= width_coefficient
new_features = max(
depth_divisor,
int(features + depth_divisor / 2) // depth_divisor * depth_divisor,
)
if new_features < 0.9 * features:
new_features += depth_divisor
return int(new_features)
def round_num_blocks(num_blocks: int, depth_coefficient: float) -> int:
"""Round number of blocks based on depth multiplier."""
return int(math.ceil(depth_coefficient * num_blocks))
class Stem(nn.Module):
"""The stem of an EfficientNet model.
The stem is the first layer, which is equivalent for all variations of
EfficientNet.
Attributes:
features: The number of filters.
"""
features: int
@nn.compact
def __call__(
self, inputs: jnp.ndarray, use_running_average: bool
) -> jnp.ndarray:
"""Applies the first step of EfficientNet to the inputs.
Args:
inputs: Inputs should be of shape `(batch size, height, width, channels)`.
use_running_average: Used to decide whether to use running statistics in
BatchNorm (test mode), or the current batch's statistics (train mode).
Returns:
A JAX array of `(batch size, height, width, features)`.
"""
x = nn.Conv(
features=self.features,
kernel_size=(3, 3),
strides=2,
use_bias=False,
padding="VALID",
)(inputs)
x = nn.BatchNorm(use_running_average=use_running_average)(x)
x = nn.swish(x)
return x
class Head(nn.Module):
"""The head of an EfficientNet model.
The head is the last layer, which is equivalent for all variations of
EfficientNet.
Attributes:
features: The number of filters.
"""
features: int
@nn.compact
def __call__(
self, inputs: jnp.ndarray, use_running_average: bool
) -> jnp.ndarray:
"""Applies the last step of EfficientNet to the inputs.
Args:
inputs: Inputs should be of shape `(batch size, height, width, channels)`.
use_running_average: Used to decide whether to use running statistics in
BatchNorm (test mode), or the current batch's statistics (train mode).
Returns:
A JAX array of `(batch size, height, width, features)`.
"""
x = nn.Conv(
features=self.features, kernel_size=(1, 1), strides=1, use_bias=False
)(inputs)
x = nn.BatchNorm(use_running_average=use_running_average)(x)
x = nn.swish(x)
return x
class EfficientNet(nn.Module):
"""EfficientNet model.
Attributes:
model: The variant of EfficientNet model to use.
include_top: If true, the model applies average pooling, flattens the
output, and applies dropout. Note that this is different from Keras's
`include_top` argument, which applies an additional linear transformation.
survival_probability: The survival probability to use for stochastic depth.
head: Optional Flax module to use as custom head.
stem: Optional Flax module to use as custom stem.
"""
model: EfficientNetModel
include_top: bool = True
survival_probability: float = 0.8
head: nn.Module | None = None
stem: nn.Module | None = None
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
use_running_average: bool | None = None,
) -> jnp.ndarray:
"""Applies EfficientNet to the inputs.
Note that this model does not include the final pooling and fully connected
layers.
Args:
inputs: Inputs should be of shape `(batch size, height, width, channels)`.
train: Whether this is training. This affects Dropout behavior, and also
affects BatchNorm behavior if 'use_running_average' is set to None.
use_running_average: Optional, used to decide whether to use running
statistics in BatchNorm (test mode), or the current batch's statistics
(train mode). If not specified (or specified to None), default to 'not
train'.
Returns:
A JAX array of `(batch size, height, width, features)` if `include_top` is
false. If `include_top` is true the output is `(batch_size, features)`.
"""
if use_running_average is None:
use_running_average = not train
scaling = SCALINGS[self.model]
if self.stem is None:
features = round_features(STEM_FEATURES, scaling.width_coefficient)
stem = Stem(features)
else:
stem = self.stem
x = stem(inputs, use_running_average=use_running_average)
for stage in STAGES:
num_blocks = round_num_blocks(stage.num_blocks, scaling.depth_coefficient)
for block in range(num_blocks):
# MBConv block with squeeze-and-excitation
strides = stage.strides if block == 0 else 1
features = round_features(stage.features, scaling.width_coefficient)
mbconv = layers.MBConv(
features=features,
strides=strides,
expand_ratio=stage.expand_ratio,
kernel_size=stage.kernel_size,
activation=nn.swish,
batch_norm=True,
reduction_ratio=REDUCTION_RATIO,
)
y = mbconv(x, use_running_average=use_running_average)
# Stochastic depth
if block > 0 and self.survival_probability:
y = nn.Dropout(
1 - self.survival_probability,
broadcast_dims=(1, 2, 3),
deterministic=not train,
)(y)
# Skip connections
x = y if block == 0 else y + x
if self.head is None:
features = round_features(HEAD_FEATURES, scaling.width_coefficient)
head = Head(features)
else:
head = self.head
x = head(x, use_running_average=use_running_average)
if self.include_top:
x = jnp.mean(x, axis=(1, 2))
x = nn.Dropout(rate=scaling.dropout_rate, deterministic=not train)(x)
return x
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantizers."""
import enum
from typing import Sequence
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
@flax.struct.dataclass
class QuantizerOutputs:
quantized: jnp.ndarray
quantization_loss: jnp.ndarray
nn_idx: jnp.ndarray
codebook: jnp.ndarray
cluster_counts: list[jnp.ndarray]
class QuantizationStrategy(enum.Enum):
"""The Quantization strategy."""
PRODUCT_QUANTIZATION = 'product_quantization'
RESIDUAL_QUANTIZATION = 'residual_quantization'
def refresh_codebooks(
model_params: flax.core.FrozenDict,
model_state: flax.core.FrozenDict,
rng: jnp.ndarray,
utilization_thresh: float,
init_scalar: float = 0.1,
) -> tuple[flax.core.FrozenDict, flax.core.FrozenDict]:
"""Restart dead codebook vectors.
When usage falls below the target utilization_thresh, codebook entries are
re-initialized by adding noise to the most-used codebook entry.
Args:
model_params: Params tree containing codebooks.
model_state: State tree containing codebook usage counts.
rng: RNG used to generate re-initialization noise.
utilization_thresh: Threshold for restarting a codebook entry. Note that
this is expressed as a proportion of the uniform probability. (ie, the
actual threshold is utilization_thresh/num_centroids.)
init_scalar: Scalar for generated initialization noise.
Returns:
Updated model_params and model_state.
"""
flat_params = flax.traverse_util.flatten_dict(model_params)
flat_model_state = flax.traverse_util.flatten_dict(model_state)
for k, codebook in flat_params.items():
# Check that the lowest variable name is codebook; ignore all other params.
if k[-1] != 'codebook':
continue
# Get the corresponding codebook assignment counts.
# These counts are generated under the 'quantizer' collection.
count_key = ('quantizer',) + k[:-1] + ('cluster_counts',)
counts = flat_model_state[count_key]
num_centroids = counts.shape[0]
cl_probs = flat_model_state[count_key] / num_centroids
thresh = utilization_thresh / num_centroids
replace = (cl_probs < thresh)[:, jnp.newaxis]
# To get replacement entries, take existing codebook entries according to
# their popularity and add a bit of noise.
noise_key, rng = jax.random.split(rng)
init_fn = jax.nn.initializers.variance_scaling(
init_scalar, 'fan_avg', 'normal', dtype=codebook.dtype
)
init_noise = init_fn(noise_key, codebook.shape)
categorical_key, rng = jax.random.split(rng)
idxs = jax.random.categorical(
categorical_key, counts, shape=[num_centroids]
)
replacement_entries = codebook[idxs, :]
init_values = replacement_entries + init_noise
updated_codebook = replace * init_values + (1.0 - replace) * codebook
updated_counts = (
replace[:, 0] * jnp.ones_like(counts) + (1.0 - replace[:, 0]) * counts
)
flat_params[k] = updated_codebook
flat_model_state[count_key] = updated_counts
unflat_params = flax.traverse_util.unflatten_dict(flat_params)
unflat_params = flax.core.frozen_dict.freeze(unflat_params)
unflat_model_state = flax.traverse_util.unflatten_dict(flat_model_state)
unflat_model_state = flax.core.frozen_dict.freeze(unflat_model_state)
return unflat_params, unflat_model_state
class BaseQuantizer(nn.Module):
"""Quantizer that can be used as a building block for ProductQuantizer.
Attributes:
num_centroids: The number of centroids.
stop_gradient_codes: Whether to apply a stop gradient on the quantizer's
codes, to protect them from being modified by downstream losses. Should
always be True, and a future CL with remove this from being an option
(keeping only for comparisons purposes with previous code).
cross_replica_axis: Name of the cross-replica axis for applying ops
requiring cross-replica reductions.
ema_decay: Decay rate for EMA operations.
init_scale: Scale for codebook initialization.
"""
num_centroids: int
stop_gradient_codes: bool = True
cross_replica_axis: str | None = None
ema_decay: float = 0.99
init_scale: float = 0.1
def get_num_centroids(self):
return self.num_centroids
def get_num_sections(self):
return 1
def create_codebook(self, flat_inputs):
"""Default codebook variable."""
embedding_dim = flat_inputs.shape[-1]
init_fn = jax.nn.initializers.variance_scaling(
self.init_scale, 'fan_avg', 'normal'
)
codebook = self.param(
'codebook', init_fn, (self.num_centroids, embedding_dim)
)
return codebook
def update_cluster_counts(self, encodings, train):
"""Track cluster utilization with an EMA counter."""
counts = jnp.sum(encodings, axis=range(len(encodings.shape) - 1))
cluster_counts = self.variable(
'quantizer', 'cluster_counts', jnp.ones, [self.num_centroids]
)
if not train:
# TODO(tomdenton): Define some better behavior for eval?
# Would be nice to re-init during eval to get eval-specific metrics.
return cluster_counts.value
self._ema_update(cluster_counts, counts)
return cluster_counts.value
def update_mean_estimate(self, flat_inputs, train):
"""Update an EMA estimate of the feature means."""
embedding_dim = flat_inputs.shape[-1]
feature_means = self.variable(
'quantizer', 'feature_means', jnp.zeros, [embedding_dim]
)
new_observation = jnp.mean(flat_inputs, axis=0)
if train:
self._ema_update(feature_means, new_observation)
return feature_means.value
def update_stdev_estimate(self, flat_inputs, train):
"""Update an EMA estimate of the feature standard deviation."""
feature_stdev = self.variable(
'quantizer', 'feature_stdev', jnp.std, flat_inputs
)
new_observation = jnp.std(flat_inputs)
if train:
self._ema_update(feature_stdev, new_observation)
return feature_stdev.value
def _ema_update(self, variable, new_value):
"""Apply an EMA variable update, possibly in cross-device context."""
if self.cross_replica_axis:
new_value = jax.lax.psum(new_value, axis_name=self.cross_replica_axis)
variable.value = (
self.ema_decay * variable.value + (1.0 - self.ema_decay) * new_value
)
class VectorQuantizer(BaseQuantizer):
"""Vector Quantizer using L2-loss.
Attributes:
commitment_loss: Loss weight for propagating quantization loss to inputs.
"""
commitment_loss: float = 0.0
demean: bool = False
rescale: bool = False
def loss(self, inputs, quantized):
quant_loss = jnp.square(quantized - jax.lax.stop_gradient(inputs))
if self.commitment_loss > 0:
encoder_loss = jnp.square(jax.lax.stop_gradient(quantized) - inputs)
quant_loss += self.commitment_loss * encoder_loss
return quant_loss
@nn.compact
def __call__(self, inputs, train):
embedding_dim = inputs.shape[-1]
flat_inputs = jnp.reshape(inputs, [-1, embedding_dim])
if self.demean:
feature_means = self.update_mean_estimate(flat_inputs, train)
flat_inputs -= feature_means
if self.rescale:
stdev = self.update_stdev_estimate(flat_inputs, train)
flat_inputs /= stdev + 1e-8
codebook = self.create_codebook(flat_inputs)
# Find nearest neighbor indices.
distances = (
jnp.sum(jnp.square(flat_inputs), 1, keepdims=True)
- 2 * jnp.matmul(flat_inputs, codebook.T)
+ jnp.sum(jnp.square(codebook.T), 0, keepdims=True)
)
nn_idx = jnp.argmin(distances, axis=1)
encodings = jax.nn.one_hot(nn_idx, self.num_centroids)
counts = self.update_cluster_counts(encodings, train)
quantized = jnp.matmul(encodings, codebook)
quantization_loss = self.loss(flat_inputs, quantized)
quantization_loss = jnp.reshape(quantization_loss, inputs.shape)
if self.rescale:
quantized *= stdev + 1e-8
if self.demean:
quantized += feature_means
quantized = jnp.reshape(quantized, inputs.shape)
nn_idx = jnp.reshape(nn_idx, inputs.shape[:-1])
# Apply stop gradient to protect the encodings from downstream losses.
quantized = inputs + jax.lax.stop_gradient(quantized - inputs)
# Expand the dimensions to match those of product quantizer, for interface
# consistency. This can be seen as a product quantizer with just 1 section.
nn_idx = jnp.expand_dims(nn_idx, 0)
codebook_values = jnp.expand_dims(codebook, 0)
if self.stop_gradient_codes:
codebook_values = jax.lax.stop_gradient(codebook_values)
return QuantizerOutputs(
quantized, quantization_loss, nn_idx, codebook_values, [counts]
)
class VectorQuantizerEnt(BaseQuantizer):
"""Vector Quantizer using entropy loss."""
gamma: float = 1.0
def loss(self, scores):
scores = jnp.reshape(scores, [-1, scores.shape[-1]])
h_clust = jnp.sum(scores * jnp.log2(scores + 1e-8), axis=-1)
h_clust = -jnp.mean(h_clust)
diversity = jnp.mean(scores, axis=0)
h_diversity = -jnp.sum(diversity * jnp.log2(diversity + 1e-8))
loss = h_clust - self.gamma * h_diversity
return loss
@nn.compact
def __call__(self, inputs, train):
embedding_dim = inputs.shape[-1]
flat_inputs = jnp.reshape(inputs, [-1, embedding_dim])
codebook = self.create_codebook(flat_inputs)
# Expand codebook and feature dimensions for broadcasting.
codes = jnp.expand_dims(codebook, range(flat_inputs.ndim - 1))
features = jax.lax.stop_gradient(flat_inputs)
features = jnp.expand_dims(features, -2)
similarity = jnp.sum(features * codes, axis=-1)
scores = jax.nn.softmax(similarity, axis=-1)
# Find nearest neighbor indices.
nn_idx = jnp.argmax(scores, axis=-1)
encodings = jax.nn.one_hot(nn_idx, self.num_centroids)
counts = self.update_cluster_counts(encodings, train)
quantized = jnp.matmul(encodings, codes)
quantized -= jnp.mean(quantized, axis=-1, keepdims=True)
quantized /= jnp.linalg.norm(quantized, axis=-1, keepdims=True)
quantized = jnp.reshape(quantized, inputs.shape)
nn_idx = jnp.reshape(nn_idx, inputs.shape[:-1])
quantization_loss = self.loss(scores)
quantization_loss = jnp.full(inputs.shape[:-1] + (1,), quantization_loss)
# Apply stop gradient to protect the encodings from downstream losses.
quantized = inputs + jax.lax.stop_gradient(quantized - inputs)
# Expand the dimensions to match those of product quantizer, for interface
# consistency. This can be seen as a product quantizer with just 1 section.
nn_idx = jnp.expand_dims(nn_idx, 0)
codebook_values = jnp.expand_dims(codebook, 0)
if self.stop_gradient_codes:
codebook_values = jax.lax.stop_gradient(codebook_values)
return QuantizerOutputs(
quantized, quantization_loss, nn_idx, codebook_values, [counts]
)
class ProductQuantizer(nn.Module):
"""Product Quantizer.
Attributes:
num_sections: The number of sections to quantize.
base_quantizers: A list of `num_sections` BaseQuantizer modules.
stop_gradient_codes: Whether to apply a stop gradient on the quantizer's
codes, to protect them from being modified by downstream losses. Should
always be True, and a future CL with remove this from being an option
(keeping only for comparisons purposes with previous code).
pca_dim: Dimension for learned PCA projection. Set <= 0 to disable.
"""
base_quantizers: Sequence[BaseQuantizer]
stop_gradient_codes: bool = True
pca_dim: int = 0
def get_pca_layer(self, embedding_dim):
"""Create PCA params for projection and pre-bias."""
if self.pca_dim <= 0:
return jnp.ones([1]), jnp.zeros([1])
projection = self.param(
'pca_proj',
jax.nn.initializers.variance_scaling(
1.0, 'fan_avg', 'normal', dtype=jnp.float32
),
[embedding_dim, self.pca_dim],
)
pre_bias = self.param(
'pre_bias', jax.nn.initializers.zeros, [1, embedding_dim]
)
return projection, pre_bias
def pca_project(self, flat_inputs):
"""Map to a low-dim'l space and minimize reconstruction error."""
if self.pca_dim <= 0:
return flat_inputs, 0, jnp.ones([1]), jnp.zeros([1])
embedding_dim = flat_inputs.shape[-1]
projection, pre_bias = self.get_pca_layer(embedding_dim)
projected = jnp.matmul(flat_inputs + pre_bias, projection)
unprojected = jnp.matmul(projected, projection.T) - pre_bias
l2_loss = jnp.sqrt(jnp.sum(jnp.square(flat_inputs - unprojected), axis=-1))
l2_loss = jnp.mean(l2_loss)
# Ensure that (P@X)@(P@X).T is orthonormal.
cov = jnp.matmul(projected.T, projected) / flat_inputs.shape[0]
cov_loss = jnp.mean(jnp.square(cov - jnp.eye(self.pca_dim)))
return projected, l2_loss + cov_loss, projection, pre_bias
def pca_unproject(self, quantized, projection, pre_bias):
if self.pca_dim <= 0:
return quantized
return jnp.matmul(quantized, projection.T) - pre_bias
def get_num_centroids(self):
nc = [q.num_centroids for q in self.base_quantizers]
assert (
len(list(set(nc))) == 1
), 'Expected all quantizers to have the same number of centroids.'
return nc[0]
def get_num_sections(self):
return len(self.base_quantizers)
@nn.compact
def __call__(self, inputs, train):
ns = self.get_num_sections()
embedding_dim = inputs.shape[-1]
flat_inputs = jnp.reshape(inputs, [-1, embedding_dim])
flat_inputs, pca_loss, projection, pre_bias = self.pca_project(flat_inputs)
# Divide the input into `num_sections` parts and quantize each separately.
input_sections = jnp.split(flat_inputs, ns, axis=-1)
loss, quantized, nn_idx, codebook_list, counts = [], [], [], [], []
for quantizer, sec in zip(self.base_quantizers, input_sections):
# Let `csz` denote the number of channels of `inputs` and `...` denotes
# irrelevant dimensions like batch size and / or number of frames. Then:
# outputs.quantized: [..., csz / ns].
# outputs.nn_idx: [1, ...].
# outputs.codebook: [1, nc, csz / ns].
outputs = quantizer(sec, train)
quantized.append(outputs.quantized)
nn_idx.append(outputs.nn_idx)
codebook_list.append(outputs.codebook)
loss.append(
jnp.reshape(outputs.quantization_loss, inputs.shape[:-1] + (-1,))
)
counts += outputs.cluster_counts
# Aggregate across 'sections' to get the following shapes:
# quantized: [..., csz].
# nn_idx: [ns, ...].
# codebook: [ns, nc, csz / ns].
quantized = jnp.concatenate(quantized, axis=-1)
quantized = self.pca_unproject(quantized, projection, pre_bias)
quantized = jnp.reshape(quantized, inputs.shape)
nn_idx = jnp.concatenate(nn_idx, axis=0)
nn_idx = jnp.reshape(nn_idx, (ns,) + inputs.shape[:-1])
codebook = jnp.concatenate(codebook_list, axis=0)
quantization_loss = jnp.mean(jnp.stack(loss, axis=0), axis=0) + pca_loss
if self.stop_gradient_codes:
codebook = jax.lax.stop_gradient(codebook)
return QuantizerOutputs(
quantized, quantization_loss, nn_idx, codebook, counts
)
class ResidualQuantizer(nn.Module):
"""A residual quantizer with explicitly passed sub-quantizers.
Accepting a list allows using arbitrary quantizers (e.g., product quantizers)
in sequence.
"""
quantizers: Sequence[nn.Module] = ()
stop_gradient_codes: bool = True
def get_num_centroids(self):
nc = [q.num_centroids for q in self.quantizers]
assert (
len(list(set(nc))) == 1
), 'Expected all quantizers to have the same number of centroids.'
return nc[0]
def get_num_sections(self):
return len(self.quantizers)
@nn.compact
def __call__(self, inputs, train=True):
quantized = 0.0
quantization_loss = 0.0
nn_idx, codebooks, counts = [], [], []
embedding_dim = inputs.shape[-1]
flat_inputs = jnp.reshape(inputs, [-1, embedding_dim])
residual = flat_inputs
for quantizer in self.quantizers:
quant_outputs = quantizer(residual, train)
quantized += quant_outputs.quantized
residual -= quant_outputs.quantized
nn_idx.append(quant_outputs.nn_idx)
codebooks.append(quant_outputs.codebook)
quantization_loss += jnp.mean(quant_outputs.quantization_loss)
counts += quant_outputs.cluster_counts
# Aggregate across 'sections' to get the following shapes:
# quantized: [...].
# nn_idx: [ns, ...].
# codebook: [ns, nc, csz / ns].
# Using non-homogenous quantizers means we can't concat the outputs.
nn_idx = jnp.concatenate(nn_idx, axis=0)
nn_idx = jnp.reshape(nn_idx, (len(self.quantizers),) + inputs.shape[:-1])
codebooks = jnp.concatenate(codebooks, axis=0)
if self.stop_gradient_codes:
codebooks = jax.lax.stop_gradient(codebooks)
quantized = jnp.reshape(quantized, inputs.shape)
quantization_loss = jnp.full(inputs.shape[:-1] + (1,), quantization_loss)
return QuantizerOutputs(
quantized, quantization_loss, nn_idx, codebooks, counts
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model layers.
Building blocks and layers to construct networks, implemented as Flax modules.
"""
from typing import Callable
from flax import linen as nn
import jax
from jax import nn as jnn
from jax import numpy as jnp
import optax
JTensor = jnp.ndarray
class SqueezeAndExcitation(nn.Module):
"""Squeeze-and-Excitation layer.
See "Squeeze-and-Excitation Networks" (Hu et al., 2018), particularly
equations 2 and 3.
Attributes:
reduction_ratio: The reduction factor in the squeeze operation. Referred to
as `r` in the paper.
activation: The activation to apply after squeezing.
"""
reduction_ratio: int = 4
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
@nn.compact
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Applies SqueezeAndExcite on the 2D inputs.
Args:
inputs: Input data in shape of `(batch size, height, width, channels)`.
Returns:
JAX array with same shape as the input.
"""
if inputs.ndim != 4:
raise ValueError(
"Inputs should in shape of `[batch size, height, width, features]`"
)
# Squeeze
x = jnp.mean(inputs, axis=(1, 2))
x = nn.Dense(features=x.shape[-1] // self.reduction_ratio, name="Reduce")(x)
x = self.activation(x)
# Excite
x = nn.Dense(features=inputs.shape[-1], name="Expand")(x)
x = nn.sigmoid(x)
return inputs * x[:, None, None, :]
class MBConv(nn.Module):
"""Mobile inverted bottleneck block.
As introduced in "Mobilenetv2: Inverted residuals and linear bottlenecks"
(Sandler et al., 2018). See figure 4d for an illustration and
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet for
a reference implementation.
The defaults are those from the MobileNetV2 paper. There is added support for
batch normalization and squeeze-and-excitation blocks as used by EfficientNet.
Note that the skip connection is not part of this module.
Attributes:
features: The number of filters.
strides: The strides to use in the depthwise separable convolution.
expand_ratio: The expansion factor to use. A block with expansion factor `N`
is commonly referred to as MBConvN.
kernel_size: The kernel size used by the depthwise separable convolution.
activation: The activation function to use after the expanding 1x1
convolution. Also used by the optional squeeze-and-excitation block.
batch_norm: Whether to use batch normalization after the expanding and
reducing convolutions.
reduction_ratio: If given, a squeeze-and-excitation block is inserted after
the depthwise separable convolution with the given reduction factor. Note
that this reduction ratio is relative to the number of input channels,
i.e., it scales with `expand_ratio`.
"""
features: int
strides: int
expand_ratio: int
kernel_size: tuple[int, int] = (3, 3)
activation: Callable[[jnp.ndarray], jnp.ndarray] = jnn.relu6
batch_norm: bool = False
reduction_ratio: int | None = None
@nn.compact
def __call__(
self, inputs: jnp.ndarray, use_running_average: bool = None
) -> jnp.ndarray:
"""Applies an inverted bottleneck block to the inputs.
Args:
inputs: Inputs should be of shape `(batch size, height, width, channels)`.
use_running_average: Used to decide whether to use running statistics in
BatchNorm (test mode), or the current batch's statistics (train mode).
Returns:
A JAX array of `(batch size, height, width, features)`.
"""
features = self.expand_ratio * inputs.shape[-1]
x = inputs
if self.expand_ratio != 1:
x = nn.Conv(
features=features,
kernel_size=(1, 1),
strides=(1, 1),
use_bias=False,
name="ExpandConv",
)(x)
if self.batch_norm:
x = nn.BatchNorm(
use_running_average=use_running_average, name="ExpandBatchNorm"
)(x)
x = self.activation(x)
if self.strides == 2:
def _pad_width(input_size: int, kernel_size: int) -> tuple[int, int]:
"""Calculate padding required to halve input with stride 2."""
return (kernel_size // 2) - (1 - input_size % 2), kernel_size // 2
padding = (
_pad_width(x.shape[1], self.kernel_size[0]),
_pad_width(x.shape[2], self.kernel_size[1]),
)
else:
padding = "SAME"
x = nn.Conv(
features=features,
kernel_size=self.kernel_size,
strides=self.strides,
padding=padding,
feature_group_count=features,
use_bias=False,
name="DepthwiseConv",
)(x)
if self.batch_norm:
x = nn.BatchNorm(
use_running_average=use_running_average, name="DepthwiseBatchNorm"
)(x)
x = self.activation(x)
if self.reduction_ratio is not None:
x = SqueezeAndExcitation(
reduction_ratio=self.reduction_ratio * self.expand_ratio,
activation=self.activation,
)(x)
x = nn.Conv(
features=self.features,
kernel_size=(1, 1),
strides=1,
use_bias=False,
name="ProjectConv",
)(x)
if self.batch_norm:
x = nn.BatchNorm(
use_running_average=use_running_average, name="ProjectBatchNorm"
)(x)
return x
class Identity(nn.Module):
"""Identity layer."""
@nn.compact
def __call__(self, inputs: jnp.ndarray, *args, **kwargs) -> jnp.ndarray:
"""Identity function.
Args:
inputs: Input array.
*args: Any other arguments are ignored.
**kwargs: Any keyword arguments are ignored.
Returns:
The input, unchanged.
"""
return inputs
class FeedForward(nn.Module):
"""Linear layer.
Attributes:
output_dims: Depth of the output.
activation: The activation to apply after the linear layer.
"""
output_dims: int = 0
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
@nn.compact
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Applies a feed forward layer to inputs.
Args:
inputs: The inputs jnp.ndarray. Shaped [..., input_dims].
Returns:
Outputs. Shaped [..., output_dims].
"""
x = nn.Dense(features=self.output_dims, name="FeedForward")(inputs)
x = self.activation(x)
return x
# Transformer layers.
class TransformerFeedForward(nn.Module):
"""Transformer feedforward layer with residual connection and dropout.
Attributes:
input_dims: Depth of the input.
hidden_dims: Hidden dimension of FFN.
activation: Activation function to use. Options are RELU, RELU6, RELU^2,
RELU^3, SIGMOID, TANH, GELU, GATED_GELU, GATED_SILU, NONE.
residual_dropout_prob: Residual dropout.
relu_dropout_prob: FFN dropout.
add_skip_connection: Whether to add residual connection.
residual_weight: Weight of the residual connection. Output = fn(x) *
residual_weight + x.
"""
input_dims: int = 0
hidden_dims: int = 0
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
residual_dropout_prob: float = 0.0
relu_dropout_prob: float = 0.0
add_skip_connection: bool = True
residual_weight: float = 1.0
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:
output_dims = self.input_dims
inputs_normalized = nn.LayerNorm(name="layer_norm")(inputs)
# Apply first FFN layer
projected_inputs = FeedForward(
output_dims=self.hidden_dims, activation=self.activation
)(inputs_normalized)
# Apply RELU dropout
projected_inputs = nn.Dropout(self.relu_dropout_prob)(
projected_inputs, deterministic=not train
)
# Apply second FFN layer
projected_inputs = FeedForward(
output_dims=output_dims, activation=Identity()
)(projected_inputs)
# Apply residual dropout
projected_inputs = nn.Dropout(self.residual_dropout_prob)(
projected_inputs, deterministic=not train
)
# Apply skip connection
if self.add_skip_connection:
projected_inputs = inputs + projected_inputs * self.residual_weight
return projected_inputs
# Convolution layers.
class LightConv1D(nn.Module):
"""Lightweight conv layer.
architecture::
input-ln()-ff()-glu()-depthwise_conv1d()-norm()-act()-ff()-dropout()-+-output
|__________________________________________________________________|
Attributes:
input_dims: Input and (in fact,) output dimension.
kernel_size: Kernel size of 1d deptwise conv.
conv_activation: Activation after normalization.
dropout_prob: Dropout probability.
"""
input_dims: int | None = None
kernel_size: int | None = None
conv_activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.swish
dropout_prob: float = 0.0
downsample: bool = True
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
use_running_average: bool | None = None,
) -> jnp.ndarray:
"""Lightweight conv layer.
Args:
inputs: Input sequence jnp.ndarray of shape [B, T, H].
train: Whether this is training. This affects Dropout behavior, and also
affects BatchNorm behavior if 'use_running_average' is set to None.
use_running_average: Optional, used to decide whether to use running
statistics in BatchNorm (test mode), or the current batch's statistics
(train mode). If not specified (or specified to None), default to 'not
train'.
Returns:
The lconv output with shape [B, T, H].
"""
if use_running_average is None:
use_running_average = not train
unnormalized_inputs = inputs
inputs = nn.LayerNorm(name="ln")(inputs)
act_inputs = FeedForward(
output_dims=self.input_dims, activation=Identity()
)(inputs)
gated_inputs = FeedForward(
output_dims=self.input_dims, activation=Identity()
)(inputs)
inputs = act_inputs * jax.nn.sigmoid(gated_inputs)
inputs = nn.Conv(
features=self.input_dims,
kernel_size=(self.kernel_size,),
strides=2 if self.downsample else 1,
padding="SAME",
input_dilation=1,
kernel_dilation=1,
feature_group_count=self.input_dims,
use_bias=False,
)(inputs)
inputs = nn.BatchNorm()(inputs, use_running_average=use_running_average)
inputs = self.conv_activation(inputs)
inputs = FeedForward(output_dims=self.input_dims, activation=Identity())(
inputs
)
inputs = nn.Dropout(self.dropout_prob)(inputs, deterministic=not train)
if self.downsample:
unnormalized_inputs = nn.avg_pool(
unnormalized_inputs, (2,), (2,), padding="SAME"
)
# If downsampling happened, the dimensions might also have changed, which
# means we need to project the inputs for the residual connection
if unnormalized_inputs.shape[-1] != self.input_dims:
unnormalized_inputs = nn.Dense(features=self.input_dims)(
unnormalized_inputs
)
output = inputs + unnormalized_inputs
return output
# Conformer layers.
class SelfAttentionWithNormAndResidual(nn.Module):
"""Self attention sub-layer used in the Conformer layer.
Input is first normalized using layer norm. Output is processed using
multi-headed attention. And finally, the output of the attention layer
is combined with the input by residual connection.
For the normalization, we can specify pre norm or post norm.
For the residual connection, we can specify the residual weight.
Attributes:
residual_weight: Weight of the residual connection. Output = fn(x) *
residual_weight + x * input_weight.
input_weight: Weight of the input connection. Output = fn(x) *
residual_weight + x * input_weight.
pre_layer_norm: Whether to apply norm before or after the layer.
residual_dropout_prob: Probability at which we apply dropout to the residual
layers, such that, residual(x, y) = (x + dropout(y)).
"""
residual_weight: float = 1.0
input_weight: float = 1.0
pre_layer_norm: bool = True
residual_dropout_prob: float = 0.0
atten_dropout_prob: float = 0.0
num_heads: int = 1
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
atten_mask: JTensor | None = None,
) -> jnp.ndarray:
unnormalized_inputs = inputs
if self.pre_layer_norm:
inputs = nn.LayerNorm()(inputs)
self_atten = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads, dropout_rate=self.atten_dropout_prob
)
result = self_atten(
inputs_q=inputs,
inputs_kv=inputs,
mask=atten_mask,
deterministic=not train,
)
if not self.pre_layer_norm:
result = nn.LayerNorm()(result)
dropout = nn.Dropout(self.residual_dropout_prob, name="residual_dropout")
result = (
dropout(result, deterministic=not train) * self.residual_weight
+ unnormalized_inputs * self.input_weight
)
return result
class Conformer(nn.Module):
"""Conformer layer as in https://arxiv.org/abs/2005.08100.
Canonical version (with default params.)
x = x + 1/2 * FFN(x)
x = x + MHSA(x)
x = x + Lconv(x)
x = x + 1/2 * FFN(x)
y = ln(x)
Residual connections are implemented inside each individual block:
FFN, MHSA, LConv.
Optionally one can change the order of MHSA and conv.
Attributes:
model_dims: Encoder model dimension.
kernel_size: Conv kernel size.
ff_activation: Activation function used in the feedforward network.
ff_residual_weight: Residual weight used in the fflayer.
ffn_dim_multiplier: Feed forward hidden dimension will be ffn_dim_multiplier
* model_dims.
atten_num_heads: Number of attention heads.
layer_order: Only mhsa, conv, mhsa_before_conv or conv_before_mhsa are
supported
dropout_prob: Dropout prob of inner components.
conv_residual_dropout: Conv block residual dropout. Will be overwritten by
p.dropout if it is not None.
atten_residual_dropout: Attention block residual dropout. Will be
overwritten by p.dropout if it is not None.
ffn_residual_dropout: Feed forward block residual dropout. Will be
overwritten by p.dropout if it is not None.
atten_dropout: Dropout in Attention layer. Will be overwritten by p.dropout
if it is not None.
ffn_relu_dropout: Post activation dropout in Feed-forward layer. Will be
overwritten by p.dropout if it is not None.
fflayer_weight_sharing: If True, will ignore `fflayer_end_tpl`, and will
make the fflayer_end layer as a weight-shared copy of the fflayer_start
layer.
"""
model_dims: int = 512
kernel_size: int = 32
ff_activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.swish
ff_residual_weight: float = 0.5
ffn_dim_multiplier: int = 4
atten_num_heads: int = 8
layer_order: str = "mhsa_before_conv"
dropout_prob: float | None = None
conv_residual_dropout: float | None = None
atten_residual_dropout: float | None = None
ffn_residual_dropout: float | None = None
atten_dropout: float | None = None
ffn_relu_dropout: float | None = None
fflayer_weight_sharing: bool = False
downsample: bool = False
skip_layer_norm: bool = True
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
use_running_average: bool | None = None,
atten_mask: jnp.ndarray | None = None,
) -> jnp.ndarray:
"""Conformer layer.
Args:
inputs: Input sequence jnp.ndarray of shape [B, T, H].
train: Whether this is training. This affects Dropout behavior, and also
affects BatchNorm behavior if 'use_running_average' is set to None.
use_running_average: Optional, used to decide whether to use running
statistics in BatchNorm (test mode), or the current batch's statistics
(train mode). If not specified (or specified to None), default to 'not
train'.
atten_mask: Input jnp.ndarray attention mask.
Raises:
RuntimeError: if an attention mask is given but there's no attention layer
Returns:
The conformer output with shape [B, T, D].
"""
if use_running_average is None:
use_running_average = not train
layer_order_set = ["mhsa", "conv", "mhsa_before_conv", "conv_before_mhsa"]
if self.layer_order not in layer_order_set:
raise ValueError(
f"`self.layer_order` must be within `{layer_order_set}`."
)
input_dims = inputs.shape[-1]
# Set up the first ff layer.
fflayer_start = TransformerFeedForward(
name="fflayer_start",
activation=self.ff_activation,
input_dims=input_dims,
hidden_dims=input_dims * self.ffn_dim_multiplier,
residual_weight=self.ff_residual_weight,
residual_dropout_prob=self.ffn_residual_dropout,
relu_dropout_prob=self.ffn_relu_dropout,
)
# Set up the last ff layer.
fflayer_end = TransformerFeedForward(
name="fflayer_end",
activation=self.ff_activation,
input_dims=self.model_dims,
hidden_dims=self.model_dims * self.ffn_dim_multiplier,
residual_weight=self.ff_residual_weight,
residual_dropout_prob=self.ffn_residual_dropout,
relu_dropout_prob=self.ffn_relu_dropout,
)
# Setup attention layer.
if "mhsa" in self.layer_order:
trans_atten = SelfAttentionWithNormAndResidual(
residual_dropout_prob=self.atten_residual_dropout,
atten_dropout_prob=self.atten_dropout,
num_heads=self.atten_num_heads,
)
# Setup convolution layer.
lconv = LightConv1D(
input_dims=self.model_dims,
kernel_size=self.kernel_size,
dropout_prob=self.conv_residual_dropout,
downsample=self.downsample,
)
if not self.skip_layer_norm:
final_ln = nn.LayerNorm(name="final_ln")
if atten_mask is not None and "mhsa" not in self.layer_order:
raise RuntimeError("Attention mask is provided but no attention layer.")
inputs = fflayer_start(inputs, train)
if self.layer_order == "mhsa":
inputs = trans_atten(inputs=inputs, train=train, atten_mask=atten_mask)
elif self.layer_order == "conv":
inputs = lconv(
inputs, train=train, use_running_average=use_running_average
)
elif self.layer_order == "mhsa_before_conv":
inputs = trans_atten(inputs=inputs, train=train, atten_mask=atten_mask)
inputs = lconv(inputs, train)
else:
inputs = lconv(inputs, train)
inputs = trans_atten(inputs=inputs, train=train, atten_mask=atten_mask)
if self.fflayer_weight_sharing:
# With the weight sharing, we apply fflayer_start again
inputs = fflayer_start(inputs, train)
else:
inputs = fflayer_end(inputs, train)
if not self.skip_layer_norm:
inputs = final_ln(inputs)
return inputs
class StridedAutopool(nn.Module):
"""Strided 1D Autopool over an array of shape [B, T, D].
See https://arxiv.org/abs/1804.10070 for basic Autopool derivation.
This implementation applies autopool to strided time windows.
"""
alpha_0: float
pool_width: int
pool_stride: int
padding: str
@nn.compact
def __call__(self, inputs):
alpha_shape = [1] * (len(inputs.shape) - 1) + [inputs.shape[-1]]
alpha = self.param(
"alpha", nn.initializers.constant(self.alpha_0), alpha_shape
)
pool_fn = lambda x: nn.pooling.avg_pool( # pylint: disable=g-long-lambda
x,
window_shape=(self.pool_width,),
strides=(self.pool_stride,),
padding=self.padding,
)
exp_inputs = jnp.exp(alpha * inputs)
auto_pooled = pool_fn(exp_inputs * inputs) / pool_fn(exp_inputs)
return auto_pooled
class EarlyFeatureExtractor(nn.Module):
"""Network used as the "early feature extractor" for HuBERT.
This module is comprised of a number of convolutional layers. It also uses
group normalization after the first layer only. It is based on the
architecture used for wav2vec 2.0 / HuBERT, and using the defaults of the
implementation from
https://github.com/facebookresearch/fairseq/blob/5307a0e078d7460003a86f4e2246d459d4706a1d/fairseq/models/wav2vec/wav2vec2.py
Attributes:
conv_layer_tuples: A List of (dim, kernel size, stride) tuples, one for
each of the convolutional layers.
dropout_prob: A float. The dropout probability.
activation: The activation to apply after each convolutional "block".
deprecated_group_conv: Whether to use the older version of this layer
(which used grouped convolutions), for compatibility with old
experiments. This option will be removed in the future.
"""
conv_layer_tuples: tuple[tuple[int, int, int], ...]
dropout_prob: float = 0.0
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.gelu
deprecated_group_conv: bool = False
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:
"""Convolutional feature extractor used for "early" feature extraction.
Args:
inputs: Input sequence jnp.ndarray of shape [B, T, H].
train: Whether we are in training mode. Affects dropout.
Returns:
A jnp.ndarray with shape [B, T, D].
"""
if self.deprecated_group_conv:
if inputs.ndim != 3:
raise ValueError("Expected the input to have 3 dimensions.")
model_dims = self.conv_layer_tuples[0][0]
if inputs.shape[-1] != model_dims:
inputs = FeedForward(output_dims=model_dims)(inputs)
# TODO(etriantafillou): Experiment with adding residual connections.
for i, (dim, k, stride) in enumerate(self.conv_layer_tuples):
inputs = nn.Conv(
features=dim,
kernel_size=(k,),
strides=(stride,),
feature_group_count=dim if self.deprecated_group_conv else 1,
use_bias=False,
name="conv_layer_{}".format(i),
)(inputs)
inputs = nn.Dropout(self.dropout_prob)(inputs, deterministic=not train)
if i == 0:
if self.deprecated_group_conv:
inputs = nn.GroupNorm(num_groups=None, group_size=dim)(inputs)
else:
inputs = nn.GroupNorm(num_groups=dim)(inputs)
inputs = self.activation(inputs)
return inputs
def hinge_loss(predictor_outputs, targets):
"""Computes the hinge loss while accommodating targets in {0, 1}."""
targets = 2 * targets - 1
return optax.hinge_loss(predictor_outputs, targets)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conformer layers."""
import dataclasses
import math
from typing import Callable
from chirp.models import layers
from flax import linen as nn
from jax import numpy as jnp
import numpy as np
class Conformer(nn.Module):
"""Projection layer followed by a conformer layer."""
model_dims: int = 512
kernel_size: int = 32
ff_activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.swish
ff_residual_weight: float = 0.5
ffn_dim_multiplier: int = 4
atten_num_heads: int = 8
layer_order: str = 'mhsa_before_conv'
dropout_prob: float | None = None
conv_residual_dropout: float | None = None
atten_residual_dropout: float | None = None
ffn_residual_dropout: float | None = None
atten_dropout: float | None = None
ffn_relu_dropout: float | None = None
fflayer_weight_sharing: bool = False
num_blocks: int = 1
# tuples of layer index and corresponding scaling of number of channels
downsample: list[tuple[int, float]] = dataclasses.field(default_factory=list)
skip_layer_norm: bool = True
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
return_intermediate_list: bool,
use_running_average: bool | None = None,
) -> jnp.ndarray:
"""Projection followed by a conformer layer.
Args:
inputs: Input sequence JTensor of shape [B, T, H].
train: Whether this is training. This affects Dropout behavior, and also
affects BatchNorm behavior if 'use_running_average' is set to None.
return_intermediate_list: Whether to return a list of the activations
after each conformer block, instead of only the final ones.
use_running_average: Optional, used to decide whether to use running
statistics in BatchNorm (test mode), or the current batch's statistics
(train mode). If not specified (or specified to None), default to 'not
train'.
Returns:
The conformer output with shape [B, T, D].
"""
if use_running_average is None:
use_running_average = not train
if inputs.shape[-1] != self.model_dims:
# Conformer requires the input dims to be `model_dims` so use a projection
# layer that maps `input_dims` to `model_dims` before the conformer layer.
inputs = layers.FeedForward(output_dims=self.model_dims)(inputs)
if self.dropout_prob is not None:
all_dropouts = [
self.atten_dropout,
self.atten_residual_dropout,
self.conv_residual_dropout,
self.ffn_residual_dropout,
self.ffn_relu_dropout,
]
for prob in all_dropouts:
assert prob is None or prob == self.dropout_prob
atten_dropout = self.dropout_prob
atten_residual_dropout = self.dropout_prob
conv_residual_dropout = self.dropout_prob
ffn_residual_dropout = self.dropout_prob
ffn_relu_dropout = self.dropout_prob
else:
atten_dropout = self.atten_dropout
atten_residual_dropout = self.atten_residual_dropout
conv_residual_dropout = self.conv_residual_dropout
ffn_residual_dropout = self.ffn_residual_dropout
ffn_relu_dropout = self.ffn_relu_dropout
intermediate = []
model_dims = self.model_dims
downsample = list(self.downsample).copy()
for i in range(self.num_blocks):
if downsample and downsample[0][0] == i:
should_downsample = True
model_dims = int(model_dims * self.downsample[0][1])
model_dims = (model_dims // self.atten_num_heads) * self.atten_num_heads
downsample = downsample[1:]
else:
should_downsample = False
inputs = layers.Conformer(
model_dims=model_dims,
kernel_size=self.kernel_size,
ff_activation=self.ff_activation,
ff_residual_weight=self.ff_residual_weight,
ffn_dim_multiplier=self.ffn_dim_multiplier,
atten_num_heads=self.atten_num_heads,
layer_order=self.layer_order,
dropout_prob=self.dropout_prob,
conv_residual_dropout=conv_residual_dropout,
atten_residual_dropout=atten_residual_dropout,
ffn_residual_dropout=ffn_residual_dropout,
atten_dropout=atten_dropout,
ffn_relu_dropout=ffn_relu_dropout,
fflayer_weight_sharing=self.fflayer_weight_sharing,
name='conformer_block_{}'.format(i),
downsample=should_downsample,
skip_layer_norm=self.skip_layer_norm,
)(inputs, train, use_running_average=use_running_average)
intermediate.append(inputs)
if return_intermediate_list:
return intermediate # pytype: disable=bad-return-type # jax-ndarray
else:
return inputs
class PositionalEmbedding(nn.Module):
"""Generates position embedding for a given 1-d sequence.
Attributes:
min_timescale: Start of the geometric index. Determines the periodicity of
the added signal.
max_timescale: End of the geometric index. Determines the frequency of the
added signal.
embedding_dims: Dimension of the embedding to be generated.
"""
embedding_dims: int = 0
min_timescale: int = 1
max_timescale: int = 10_000
@nn.compact
def __call__(self, seq_length: int) -> jnp.ndarray:
"""Generates an array of sinusoids with different frequencies.
Args:
seq_length: Sequence length of the embeddings to be generated.
Returns:
An array of shape (1, seq_length, embedding_dim) containing positional
embeddings.
"""
position = jnp.arange(seq_length, dtype=jnp.float32)[jnp.newaxis, :]
num_timescales = self.embedding_dims // 2
log_timescale_increment = math.log(
self.max_timescale / self.min_timescale
) / jnp.maximum(num_timescales - 1, 1.0)
inv_timescales = self.min_timescale * jnp.exp(
jnp.arange(num_timescales) * -log_timescale_increment
)
scaled_time = (
position[:, :, jnp.newaxis]
* inv_timescales[jnp.newaxis, jnp.newaxis, :]
)
signal = jnp.concatenate(
[jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=2
)
# Force usage of `np` rather than `jnp` to compute static values at trace
# time.
if self.embedding_dims != 0:
signal = jnp.pad(
signal, [(0, 0), (0, 0), (0, np.mod(self.embedding_dims, 2))]
)
return signal
class ConvolutionalSubsampling(nn.Module):
"""Convolutional subsampling module.
This is the convolutional subsampling module as used in the conformer
paper[^1]. It consists of two 2D convolutional layers with a stride of 2.
The frequencies and output channels get combined to produce a 1D output.
Relative positional embeddings are added for the conformer blocks.
[1]: Gulati, Anmol, et al. "Conformer: Convolution-augmented transformer for
speech recognition." arXiv preprint arXiv:2005.08100 (2020).
"""
features: int
kernel_size: tuple[int, int] = (3, 3)
strides: tuple[int, int] = (2, 2)
num_layers: int = 2
dropout_prob: float = 0.1
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:
"""Apply convolutional subsampling.
Args:
inputs: A batch of spectrograms of size (batch, time, channels).
train: Whether or not this is training (used for dropout).
Returns:
A subsampled array that is 4 times small in the time and channels dims.
"""
x = inputs
# Subsample
x = nn.Conv(self.features, self.kernel_size, strides=self.strides)(x)
x = nn.relu(x)
x = nn.Conv(self.features, self.kernel_size, strides=self.strides)(x)
x = nn.relu(x)
# Merge channels and frequency dimension
x = jnp.reshape(x, x.shape[:-2] + (-1,))
x = nn.Dense(self.features)(x)
# Add positional embeddings
x = x + PositionalEmbedding(embedding_dims=self.features)(x.shape[-2])
x = nn.Dropout(self.dropout_prob, deterministic=not train)(x)
return x
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Separation model."""
from typing import Callable
from chirp.models import layers
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
SOUNDSTREAM_UNET = 'soundstream_unet'
@flax.struct.dataclass
class SeparatorOutput:
"""Separation model outputs."""
separated_audio: jnp.ndarray
bottleneck: jnp.ndarray | None = None
embedding: jnp.ndarray | None = None
label: jnp.ndarray | None = None
genus: jnp.ndarray | None = None
family: jnp.ndarray | None = None
order: jnp.ndarray | None = None
def time_reduce_logits(self, reduction: str = 'AVG') -> 'SeparatorOutput':
"""Returns a new ModelOutput with scores reduced over the time axis.
Args:
reduction: Type of reduction to use. One of AVG (promotes precision), MAX
(promotes recall), or MIDPOINT (unbiased, but probably useless for very
long sequences).
"""
if reduction == 'AVG':
reduce_fn = lambda x: jnp.mean(x, axis=1)
elif reduction == 'MAX':
reduce_fn = lambda x: jnp.max(x, axis=1)
elif reduction == 'MIDPOINT':
midpt = self.label.shape[1] // 2
reduce_fn = lambda x: x[:, midpt, :]
else:
raise ValueError(f'Reduction {reduction} not recognized.')
return SeparatorOutput(
self.separated_audio,
self.bottleneck,
self.embedding,
reduce_fn(self.label) if self.label is not None else None,
reduce_fn(self.genus) if self.genus is not None else None,
reduce_fn(self.family) if self.family is not None else None,
reduce_fn(self.order) if self.order is not None else None,
)
def enforce_mixture_consistency_time_domain(
mixture_waveforms, separated_waveforms, use_mag_weighting=False
):
"""Projection implementing mixture consistency in time domain.
This projection makes the sum across sources of separated_waveforms equal to
mixture_waveforms and minimizes the unweighted mean-squared error between the
sum across sources of separated_waveforms and mixture_waveforms. See
https://arxiv.org/abs/1811.08521 for the derivation.
Args:
mixture_waveforms: Array of mixture waveforms with shape [B, T].
separated_waveforms: Array of separated waveforms with shape [B, C, T].
use_mag_weighting: If True, mix weights are magnitude-squared of the
separated signal.
Returns:
Projected separated_waveforms as an array in source image format.
"""
# Modify the source estimates such that they sum up to the mixture, where
# the mixture is defined as the sum across sources of the true source
# targets. Uses the least-squares solution under the constraint that the
# resulting source estimates add up to the mixture.
num_sources = separated_waveforms.shape[1]
mix = jnp.expand_dims(mixture_waveforms, 1)
mix_estimate = jnp.sum(separated_waveforms, 1, keepdims=True)
if use_mag_weighting:
mix_weights = 1e-8 + jnp.mean(
separated_waveforms**2, axis=2, keepdims=True
)
mix_weights /= jnp.sum(mix_weights, axis=1, keepdims=True)
else:
mix_weights = 1.0 / num_sources
correction = mix_weights * (mix - mix_estimate)
separated_waveforms = separated_waveforms + correction
return separated_waveforms
class SeparationModel(nn.Module):
"""Audio separation model.
We use a general masked separation approach, similar to ConvTasNet. Input
audio with shape [[B]atch, [T]ime] is run through an invertible 'bank'
transform (usually STFT or a learned filterbank), obtaining shape
[B, T, [F]ilters]. A mask-generator network consumes the banked audio and
produces a set of (usually sigmoid) masks with shape [B, T, [C]hannels, F].
These masks are broadcast multiplied by the banked audio to create C separated
audio channels. Then an 'unbank' (ie, synthesis filterbank) tranformation
returns the masked audio channels to the time domain. Finally, a mixture
consistency projection is applied.
[^1]: ConvTasNet: https://arxiv.org/pdf/1809.07454.pdf
Attributes:
bank_transform: A transform consuming a batch of audio with shape [B, T] and
returning an array of shape [B, T, F].
unbank_transform: A transform returning an array of shape [B, T, F] to
time-domain audio with shape [B, T].
mask_generator: A network transforming an array banked audio of shape [B, T,
F] to an output with the same batch and time dimensions as the input
banked audio. This module handles the transformation of the mask_generator
outputs to actual mask values.
num_mask_channels: Number of separated channels.
mask_kernel_size: Kernel size for transpose convolution to mask logits.
bank_is_real: Indicates if the banked audio is complex valued. If so, we
take the magnitude of the bank values before feeding them to the
mask_generator network.
"""
bank_transform: Callable[[jnp.ndarray], jnp.ndarray]
unbank_transform: Callable[[jnp.ndarray], jnp.ndarray]
mask_generator: nn.Module
num_mask_channels: int = 4
mask_kernel_size: int = 3
bank_is_real: bool = False
num_classes: dict[str, int] | None = None
classify_bottleneck: bool = False
classify_pool_width: int = 250
classify_stride: int = 50
classify_features: int = 512
def check_shapes(self, banked_inputs, mask_hiddens):
if mask_hiddens.shape[-3] != banked_inputs.shape[-3]:
raise ValueError(
'Output mask_hiddens must have the same time dimensionality as the '
'banked_inputs. Got shapes: %s vs %s'
% (mask_hiddens.shape, banked_inputs.shape)
)
def bottleneck_classifier(self, bottleneck, train: bool):
"""Create classification layer over the bottleneck."""
# TODO(tomdenton): Experiment with removing this layernorm.
bottleneck = nn.normalization.LayerNorm(reduction_axes=(-2, -1))(bottleneck)
classify_hiddens = layers.StridedAutopool(
0.5, self.classify_pool_width, self.classify_stride, padding='SAME'
)(bottleneck)
classify_hiddens = nn.Conv(
features=self.classify_features,
kernel_size=(1,),
strides=(1,),
padding='SAME',
)(classify_hiddens)
classify_hiddens = nn.swish(classify_hiddens)
classify_outputs = {}
for k, n in self.num_classes.items():
classify_outputs[k] = nn.Conv(n, (1,), (1,), 'SAME')(classify_hiddens)
classify_outputs['embedding'] = classify_hiddens
return classify_outputs
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> SeparatorOutput:
"""Apply the separation model."""
banked_inputs = self.bank_transform(inputs)
num_banked_filters = banked_inputs.shape[-1]
if self.bank_is_real:
mask_inputs = banked_inputs
else:
mask_inputs = jnp.abs(banked_inputs)
mask_hiddens, bottleneck = self.mask_generator(mask_inputs, train=train)
self.check_shapes(banked_inputs, mask_hiddens)
# Convert mask_hiddens to actual mask values.
# TODO(tomdenton): Check whether non-trivial mask_kernel_size really helps.
masks = nn.ConvTranspose(
features=self.num_mask_channels * num_banked_filters,
kernel_size=(self.mask_kernel_size,),
)(mask_hiddens)
masks = jax.nn.sigmoid(masks)
# Reshape the masks for broadcasting to [B, T, C, F].
masks = jnp.reshape(
masks,
[
masks.shape[0],
masks.shape[1],
self.num_mask_channels,
num_banked_filters,
],
)
# Apply the masks to the banked input.
masked_banked_inputs = masks * jnp.expand_dims(banked_inputs, -2)
# To undo the bank transform, swap axes to get shape [B, C, T, F]
masked_banked_inputs = jnp.swapaxes(masked_banked_inputs, -2, -3)
unbanked = self.unbank_transform(masked_banked_inputs)
unbanked = enforce_mixture_consistency_time_domain(inputs, unbanked)
model_outputs = {
'separated_audio': unbanked,
'bottleneck': bottleneck,
}
if self.classify_bottleneck:
model_outputs.update(self.bottleneck_classifier(bottleneck, train=train))
return SeparatorOutput(**model_outputs)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HuBERT model."""
import enum
from typing import Any, Sequence
from chirp.models import conformer
from chirp.models import layers
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
@flax.struct.dataclass
class HubertOutput:
embedding: list[jnp.ndarray]
logits: list[jnp.ndarray]
targets: list[jnp.ndarray]
mask_idc: jnp.ndarray
quantization_loss: list[jnp.ndarray]
label: list[jnp.ndarray]
genus: list[jnp.ndarray] | None = None
family: list[jnp.ndarray] | None = None
order: list[jnp.ndarray] | None = None
class QuantizerPoints(enum.Enum):
"""A point in the architecture to add a quantizer."""
FRONTEND = -2
EARLY_FS = -1
def compute_mask_indices(
key: jnp.ndarray,
shape: tuple[int, int],
mask_prob: float,
mask_length: int,
min_masks: int = 0,
) -> jnp.ndarray:
"""Computes random mask spans for a given shape.
Args:
key: The key for random operations.
shape: The shape of the mask that will be computed. A tuple of two elements,
corresponding to the batch size and the number of frames.
mask_prob: The probability for each token to be chosen as the starting index
of a 'masked span'.
mask_length: The length of each 'masked span'.
min_masks: The minimum number of masked spans.
Returns:
mask: A boolean jnp.array that has the same shape as `shape`.
"""
bsz, sz = shape
key, subkey = jax.random.split(key)
# `num_mask` is the number of 'masked spans' for each sample in the batch.
# A random number is added for probabilistic rounding. We use the 'static'
# strategy where each sample in the batch has the same number of masked spans.
rounding_offset = jax.random.uniform(subkey, shape=(bsz,))
key, subkey = jax.random.split(key)
num_mask = mask_prob * sz / jnp.array(mask_length, float) + rounding_offset
num_mask = jnp.full(bsz, num_mask).astype(int)
max_masks = sz - mask_length + 1
num_mask = jnp.clip(num_mask, a_min=min_masks, a_max=max_masks)
# First, sample a set of start indices for the max possible number of masks.
# Do this sampling separately for each batch sample, to allow `replace`=False.
max_start_index = sz - mask_length
mask_idc = []
for _ in range(bsz):
mask_idc.append(
jax.random.choice(
subkey, max_start_index + 1, shape=(max_masks,), replace=False
)
)
key, subkey = jax.random.split(key)
mask_idc = jnp.stack(mask_idc, axis=0)
# Now filter these starting indices to `num_mask` 'active' ones. This is done
# by replacing the 'inactive' ones to start at some index that is beyond the
# length of the sequence. The scatter operation later will disregard these.
mask_idc = jnp.reshape(mask_idc, [-1])
inactive_start_idx = sz
a = jnp.array([a % max_masks for a in jnp.arange(max_masks * bsz)])
num_mask = jnp.reshape(
jnp.repeat(jnp.expand_dims(num_mask, 1), axis=1, repeats=max_masks), [-1]
)
mask_idc = jnp.where(a < num_mask, mask_idc, inactive_start_idx)
# Add the offsets, to get all masked indices of each span.
mask_idc = jnp.concatenate(
[mask_idc + offset for offset in range(mask_length)]
)
# Prepare the `scatter_indices`, i.e. the positions of a (bsz, sz) array that
# will be set to 1s in the binary mask that will be returned.
batch_inds = jnp.reshape(
jnp.repeat(
jnp.expand_dims(jnp.arange(bsz), 1), axis=1, repeats=max_masks
),
[-1],
)
batch_inds = jnp.reshape(
jnp.repeat(jnp.expand_dims(batch_inds, 0), axis=0, repeats=mask_length),
[-1],
)
scatter_indices = jnp.stack((batch_inds, mask_idc), axis=1)
mask = jax.lax.scatter(
jnp.zeros((bsz, sz)).astype(int),
scatter_indices,
jnp.ones_like((mask_idc)).astype(int),
jax.lax.ScatterDimensionNumbers(
update_window_dims=(),
inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1),
),
)
return mask
@flax.struct.dataclass
class QuantizerBundle:
quantization_loss: jnp.ndarray
targets: jnp.ndarray
codebook: jnp.ndarray
projected_feature_codes: jnp.ndarray
class HuBERTEval(nn.Module):
"""HuBERT model for evaluation.
Attributes:
early_feature_extractor: A network (e.g., a 2D convolutional network) that
takes spectrograms and returns feature vectors. Quantization is performed
on the features produced by this feature extractor.
late_feature_extractor: A network (e.g., a stack of Conformer blocks) that
takes "early" features and returns a sequence of Jax ndarrays that contain
increasingly higher-level features.
frontend: The frontend to use to generate features.
use_raw_audio: Whether to feed raw audio into the feature extractor to get
HuBERT's predictions (as opposed to audio processed with a frontend). The
current best configuration sets this option to True but performs
quantization after the frontend to obtain targets for HuBERT.
add_positional_embeddings: Whether to add positional embeddings to the late
feature extractor.
"""
early_feature_extractor: nn.Module | None
late_feature_extractor: nn.Module
frontend: nn.Module | None = None
use_raw_audio: bool = True
add_positional_embeddings: bool = False
@nn.compact
def __call__(self, inputs: jnp.ndarray, block_to_readout=6):
"""Forward pass through the HuBERT model for evaluation.
bsz: batch size.
sz: number of frames (timesteps).
csz: number of channels.
Args:
inputs: Audio of shape `(bsz, sz)`.
block_to_readout: The integer representing the block of the late feature
extractor from which to return embeddings.
Returns:
The late feature extractor embeddings of after block `block_to_readout`.
"""
if self.use_raw_audio:
# Raw audio (no frontend) is fed through the early feature extractor.
early_fs_inputs = jnp.expand_dims(inputs, -1) # (bsz, sz, 1)
x_earlyfs = self.early_feature_extractor(early_fs_inputs, train=False)
x = x_earlyfs
else:
# Process audio with a frontend before the early feature extractor.
x_frontend = self.frontend(inputs) # (bsz, sz, csz)
if self.early_feature_extractor is not None:
x_earlyfs = self.early_feature_extractor(x_frontend, train=False)
x = x_earlyfs
_, sz, csz = x.shape
if self.add_positional_embeddings:
x = x + conformer.PositionalEmbedding(embedding_dims=csz)(seq_length=sz)
# Pass x through the "late" feature extractor. Returns a list of x's for the
# different "readout points".
x_list = self.late_feature_extractor(
x, train=False, return_intermediate_list=True
)
if block_to_readout < 0 or block_to_readout >= len(x_list):
raise ValueError(
"The `block_to_readout` should be in the range "
"[0, len(x_list)) where x_list is the list that the "
"late feature extractor returns. But got {} and "
"len(x_list) is {}".format(block_to_readout, len(x_list))
)
# Stop gradient so that a classifier is trained on frozen features for
# evaluation purposes.
return jax.lax.stop_gradient(x_list[block_to_readout])
class HuBERTModel(nn.Module):
"""HuBERT model.
Attributes:
num_classes: Number of classes for each output head. These are used to train
supervised readout layer for evaluation only. The representation is
learned in a purely self-supervised manner.
early_feature_extractor: A network (e.g., a 2D convolutional network) that
takes spectrograms and returns feature vectors. Quantization is performed
on the features produced by this feature extractor.
late_feature_extractor: A network (e.g., a stack of Conformer blocks) that
takes "early" features and returns a sequence of Jax ndarrays that contain
increasingly higher-level features.
quantizer: A list of quantizer networks, each of which returns a codebook,
assignments of inputs to codes, and a loss for training it. This list may
contain only a single element, or several in the case of quantizing in
different feature spaces.
frontend: The frontend to use to generate features.
use_raw_audio: Whether to feed raw audio into the feature extractor to get
HuBERT's predictions (as opposed to audio processed with a frontend). The
current best configuration sets this option to True but performs
quantization after the frontend to obtain targets for HuBERT.
mask_config: The config for generating masks.
classifier_config: The config for the classifier.
taxonomy_loss_weight: Weight for taxonomic label losses. These are used to
train supervised readout layer for evaluation only. The representation is
learned in a purely self-supervised manner.
readout_points: A List of indices of late feature extractor blocks after
which to add a readout layer (for classification). The allowed values are
in the range [0, len(x_list)) where x_list is the list of Jax ndarrays
returned by the late feature extractor.
quantizer_points: A list of integers indicating where to quantize. The
allowed values are ints in the range [0, len(x_list)) where x_list is the
list of ndarrays returned by the late feature extractor and values of the
QuantizerPoints class. Specifically, any non-negative integer represents
quantizing after the late feature extractor block with that integer index,
whereas QuantizerPoints values also allow to quantize right after the
frontend (QuantizerPoints.FRONTEND) and right after the early feature
extractor (QuantizerPoints.EARLY_FS).
final_dim: The dimensionality after the final projection layer.
logit_temp: The temperature to use for the logits of which cluster each
timestep belongs to.
alpha: The weight of the masked loss in the combination of the masked and
unmasked losses for HuBERT. By default it's 1, considering only masked.
stop_gradient_earlyfs: Whether to stop gradient after the early feature
extractor.
omit_classifier_stop_grads: Optionally, a list of integers indicating which
of the readout points to omit the stop-gradient for. Specifically, a
classifier is added to each readout point, and typically a stop-gradient
is used to prevent the classifier from modifying the representations (this
happens by default, if this argument isn't provided, or if it's None). If
provided, specifies the index (or indices) of readout locations where that
stop-gradient operation will be omitted. This allows, for instance,
supervised finetuning of HuBERT representations, and semi-supervised
learning.
add_positional_embeddings: Whether to add positional embeddings to the late
feature extractor.
"""
num_classes: dict[str, int]
early_feature_extractor: nn.Module | None
late_feature_extractor: nn.Module
quantizer: list[nn.Module]
frontend: nn.Module
use_raw_audio: bool
mask_config: dict[str, Any]
classifier_config: dict[str, Any]
taxonomy_loss_weight: float
readout_points: list[int]
quantizer_points: Sequence[int]
final_dim: int = 512
logit_temp: float = 0.1
alpha: float = 1.0
stop_gradient_earlyfs: bool = True
omit_classifier_stop_grads: Sequence[int] | None = None
add_positional_embeddings: bool = False
def classify(
self,
x_list,
mask_idc,
per_frame_predictions,
classify_pool_width,
classify_stride,
classify_features,
reduction_type,
classify_from_all,
):
# The gradients of this loss will not propagate to train the representation
# (the representation is trained purely self-supervised), unless it is
# requested to omit placing a stop-gradient on the classifier.
for i in range(len(x_list)):
if (
self.omit_classifier_stop_grads is None
or i not in self.omit_classifier_stop_grads
):
x_list[i] = jax.lax.stop_gradient(x_list[i])
outputs = {}
midpt = x_list[-1].shape[-2] // 2 # The middle frame.
for k, n in self.num_classes.items():
outputs[k] = []
# We use separate readout heads on different "levels" of representation.
for i, x_interm in enumerate(x_list):
if i not in self.readout_points:
continue
csz_ = x_interm.shape[-1]
if per_frame_predictions:
# Borrow the classifier from `separation_model.py`.
x_interm = nn.normalization.LayerNorm(reduction_axes=(-2, -1))(
x_interm
)
x_interm = layers.StridedAutopool(
0.5,
classify_pool_width,
classify_stride,
padding="SAME",
name="readout_autopool_{}_{}".format(k, i),
)(x_interm)
x_interm = nn.Conv(
features=classify_features,
kernel_size=(1,),
strides=(1,),
padding="SAME",
name="readout_conv1_{}_{}".format(k, i),
)(x_interm)
x_interm = nn.swish(x_interm)
per_frame_preds = nn.Conv(
n, (1,), (1,), "SAME", name="readout_conv2_{}_{}".format(k, i)
)(x_interm)
# Now reduce over the time axis to get 1 prediction per *sample*.
if reduction_type == "AVG":
reduce_fn = lambda x: jnp.mean(x, axis=-2)
elif reduction_type == "MAX":
reduce_fn = lambda x: jnp.max(x, axis=-2)
elif reduction_type == "MIDPOINT":
reduce_fn = lambda x: x[..., midpt, :]
else:
raise ValueError(f"Reduction {reduction_type} not recognized.")
outputs[k].append(reduce_fn(per_frame_preds))
else:
# Akin to the implementation of conformers in the supervised model,
# we average over the time dimension before the readout layer, to
# collapse x from [bsz, sz, csz] to [bsz, csz]. But in this case
# we only average the *unmasked* frames if `classify_from_all` is off.
if classify_from_all:
mean = jnp.mean(x_interm, axis=1)
else:
# x_filtered_zeros has 0s in place of masked embeddings, while
# keeping only the unmasked embeddings intact. [bsz, sz, csz_].
mask_idc_exp = jnp.repeat(
jnp.expand_dims(mask_idc, 2), repeats=csz_, axis=2
)
x_filtered = jnp.where(mask_idc_exp, 0, x_interm)
mean = jnp.sum(x_filtered, axis=1) / jnp.sum(
mask_idc_exp == 0, axis=1
)
outputs[k].append(
nn.Dense(n, name="readout_{}_{}".format(k, i))(mean)
)
return outputs
def add_projected_quantizer(self, x, quantizers, train):
"""Adds a quantizer on top of features x."""
# Get the next quantizer module.
quant_index = len(quantizers)
quantizer = self.quantizer[quant_index]
nc = quantizer.get_num_centroids()
ns = quantizer.get_num_sections()
# Get the codes, quantization targets and quantizer loss.
quant_outputs = quantizer(x, train)
# codes: [ns, nc, csz / ns], where ns = 1 if not using PQ.
codes = quant_outputs.codebook
# quant_outputs.nn_idx: [ns, bsz, sz].
# targets: [ns, bsz, sz, nc].
nn_idx = quant_outputs.nn_idx
targets = jax.nn.one_hot(nn_idx, nc)
# Project the centroids.
# A list of ns many elements that have shape [nc, final_dim].
codes_pj = [
nn.Dense(
self.final_dim, name="codes_proj_{}_{}".format(quant_index, i)
)(codes[i])
for i in range(ns)
]
# [ns, nc, final_dim].
codes_pj = jnp.stack(codes_pj, axis=0)
quantizers.append(
QuantizerBundle(
quant_outputs.quantization_loss, targets, codes, codes_pj
)
)
return quantizers
def apply_final_projection(self, x, quantizers):
"""Apply projection layer(s) on the features.
A separate projection layer is used for each "section" (if using product
quantization) of each quantizer.
Args:
x: Embeddings from late feature extractor of shape [bsz, sz, csz].
quantizers: A list of QuantizerBundle's, one per quantizer.
Returns:
projected_x: A list whose length is the same as that of quantizers. Each
element is the projected features of shape [ns, bsz, sz, final_dim].
"""
projected_x = []
# Create a separate (set of) projection(s) for each quantizer.
for j in range(len(quantizers)):
# A list of ns many elements that have shape [bsz, sz, csz/ns].
x_sections = jnp.split(x, self.quantizer[j].get_num_sections(), axis=-1)
# A list of ns many elements that have shape [bsz, sz, final_dim].
x_proj = [
nn.Dense(
self.final_dim, name="final_proj_section_{}_quant_{}".format(i, j)
)(x_sec)
for (i, x_sec) in enumerate(x_sections)
]
# [ns, bsz, sz, final_dim].
projected_x.append(jnp.stack(x_proj, axis=0))
return projected_x
def get_logits(self, x_list, quantizers):
"""Compute the logits i.e.
similarity between projected features and codes.
Args:
x_list: A list whose length is the number of quantizers. Each element of
that list is an array of shape [ns, bsz, sz, final_dim], storing the
features that were projected with a layer specific to that quantizer.
quantizers: A list of the same length as x_list, storing the
QuantizerBundle for each quantizers.
Returns:
The logits, as a list of [ns, bsz, sz, nc]-shaped jnp.arrays. The length
of this list is the number of quantizers.
"""
# Predict the code of each timestep using cosine similarity between the
# projected embeddings and the projected codes.
all_logits = []
for x, q_bundle, q_module in zip(x_list, quantizers, self.quantizer):
# First, l2-normalize the (projected) features and codes.
x /= jnp.linalg.norm(x, axis=-1, keepdims=True) + 1e-5
codes_pj = q_bundle.projected_feature_codes
codes_pj /= jnp.linalg.norm(codes_pj, axis=-1, keepdims=True) + 1e-5
# Then, compute the dot product between them.
ns = q_module.get_num_sections()
codes_pj = jnp.transpose(codes_pj, (0, 2, 1)) # [ns, final_dim, nc]
logits = jnp.dot(x, codes_pj) # [ns, bsz, sz, ns, nc]
# For each "section" of features, grab only the cluster assignments
# corresponding to that section.
logits = jnp.transpose(logits, (0, 3, 1, 2, 4)) # [ns, ns, bsz, sz, nc]
# Out of the first 2 dims want to keep the inds [(0,0), (1,1), (2,2)...]
inds = jnp.stack((jnp.arange(ns), jnp.arange(ns)), axis=1)
logits = logits[tuple(jnp.moveaxis(inds, -1, 0))] # [ns, bsz, sz, nc]
# TODO(etriantafillou): experiment with learnable temperature.
logits /= self.logit_temp
all_logits.append(logits)
return all_logits
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
train_mode_quantizer: bool,
mask_key: jnp.ndarray | None,
) -> HubertOutput:
"""Apply the HuBERT model.
The quantizer used may either be Product Quantizer (PQ) or a base quantizer.
In the former case, instead of making a single centroid prediction per
frame, a prediction is made for each "section" of the product quantizer.
There is also a corresponding target for each section if using PQ, and the
HuBERT loss becomes the average of the per-section losses.
bsz: batch size.
sz: number of frames (timesteps).
csz: number of channels.
nc: number of centroids.
ns: number of sections of the product quantizer (if applicable).
Args:
inputs: Audio of shape `(bsz, sz)`.
train: Whether we're in training mode (affects batch norm and dropout).
train_mode_quantizer: Whether the quantizer is in train mode (affects EMA
counter for cluster utilization).
mask_key: A jnp.array that serves as the key for sampling masks. It can be
None if `train` is False since no mask is applied in that case.
Returns:
Logits for which cluster each timestep belongs to (per section of the
product quantizer, if applicable).
"""
if len(self.quantizer) != len(self.quantizer_points):
raise ValueError(
"The lengths of `quantizer` and `quantizer_points` "
"should match, but are {} and {}.".format(
len(self.quantizer), len(self.quantizer_points)
)
)
if self.omit_classifier_stop_grads is not None:
for i in self.omit_classifier_stop_grads:
if i < 0 or i >= len(self.readout_points):
raise ValueError(
"Requested to omit the stop-grad from classifier "
f"with index {i} but there are only {len(self.readout_points)} "
"readout points / classifiers."
)
model_outputs = {}
quantizers = []
if self.use_raw_audio:
# Raw audio (no frontend) is fed through the early feature extractor.
early_fs_inputs = jnp.expand_dims(inputs, -1) # (bsz, sz, 1)
x_earlyfs = self.early_feature_extractor(early_fs_inputs, train=train)
if QuantizerPoints.FRONTEND.value in self.quantizer_points:
x_frontend = self.frontend(inputs) # (bsz, sz, csz)
if x_earlyfs.shape[-2] != x_frontend.shape[-2]:
raise ValueError(
f"Expected the number of frontend frames ({x_frontend.shape[-2]})"
" to match the number of frames from the early feature extractor "
f"({x_earlyfs.shape[-2]}) in order to have as many HuBERT "
"predictions as there are targets, since `quantizer_points` "
"includes quantizing on top of the frontend."
)
x = x_earlyfs
else:
# Process audio with a frontend before the early feature extractor.
x_frontend = self.frontend(inputs) # (bsz, sz, csz)
if self.early_feature_extractor is not None:
x_earlyfs = self.early_feature_extractor(x_frontend, train=train)
x = x_earlyfs
# Add quantizers on frontend and/or early fs, if requested.
if QuantizerPoints.FRONTEND.value in self.quantizer_points:
quantizers = self.add_projected_quantizer(
x_frontend, quantizers, train_mode_quantizer
)
if QuantizerPoints.EARLY_FS.value in self.quantizer_points:
# Add the first quantizer, directly on top of the "early features".
quantizers = self.add_projected_quantizer(
x_earlyfs, quantizers, train_mode_quantizer
)
bsz, sz, csz = x.shape
if self.add_positional_embeddings:
x = x + conformer.PositionalEmbedding(embedding_dims=csz)(seq_length=sz)
if self.stop_gradient_earlyfs:
# If no early feature extractor is used, this should have no effect.
# Otherwise, doing this will disallow HuBERT to train the early fs.
# Note that this leads to not training the early fs at all (the quantizer
# loss won't train it either, due to stopping gradients in quantizer.py).
# Quantizing on top of random early features is maybe an interesting
# baseline, if *consistency* of targets is what matters most.
x = jax.lax.stop_gradient(x)
# The learnable mask token.
mask_emb = self.param("mask_emb", nn.initializers.uniform(), (csz,))
# Get the corrupted x, where the features are replaced with the learnable
# masked embedding for the positions that are chosen to be masked, if we are
# in training mode.
mask_idc = jnp.zeros((bsz, sz))
if mask_key is not None:
mask_idc = compute_mask_indices(
mask_key, shape=(bsz, sz), **self.mask_config
)
model_outputs["mask_idc"] = mask_idc
mask_idc_exp = jnp.repeat(jnp.expand_dims(mask_idc, 2), repeats=csz, axis=2)
x = jnp.where(mask_idc_exp > 0, mask_emb, x)
# Pass the corrupted x through the "late" feature extractor. Returns a list
# of x's for the different "readout points".
x_list = self.late_feature_extractor(
x, train=train, return_intermediate_list=True
)
for block_ind in self.readout_points:
if block_ind < 0 or block_ind >= len(x_list):
raise ValueError(
"Each element of `readout_points` should be in the "
"range [0, len(x_list)) where x_list is the list that "
"the late feature extractor returns. Found element "
"{} and len(x_list) is {}".format(block_ind, len(x_list))
)
x = x_list[-1] # the final-layer "embeddings"
_, _, csz = x.shape
model_outputs["embedding"] = x_list
# Add additional quantizers on blocks of the late feature extractor.
for point in list(self.quantizer_points):
if (
point == QuantizerPoints.FRONTEND.value
or point == QuantizerPoints.EARLY_FS.value
):
# Quantizers on the frontend and the early feature extractor will have
# already been added, if requested. Nothing more to do here.
continue
elif point < 0:
raise ValueError(
"An element of `quantizer_points` can only be "
f"negative if it's -1 or -2, but found {point}."
)
elif point >= len(x_list):
raise ValueError(
"Each element of `quantizer_points` should be in the "
"range [0, len(x_list)) where x_list is the list that "
"the late feature extractor returns. Found element "
"{} and len(x_list) is {}".format(point, len(x_list))
)
quantizers = self.add_projected_quantizer(
x_list[point], quantizers, train_mode_quantizer
)
# Linear readouts for supervised classification on top of HuBERT embeddings.
classification_outputs = self.classify(
x_list, mask_idc=mask_idc, **self.classifier_config
)
model_outputs.update(classification_outputs)
# Final projection layer that projects embeddings to `final_dim`.
# A list with as many elements as the number of quantizers used, where each
# element has shape [ns, bsz, sz, final_dim].
x_proj_list = self.apply_final_projection(x, quantizers)
# Compute the logits via cosine similarity between the projected embeddings
# and the projected codes.
# A list of [ns, bsz, sz, nc]-shaped jnp.arrays with one item per quantizer.
logits = self.get_logits(x_proj_list, quantizers)
model_outputs["logits"] = logits
# The targets for each quantizer.
model_outputs["targets"] = [
quantizers[i].targets for i in range(len(quantizers))
]
# The quantization loss: the mean over the individual quantizer losses.
# [bsz, sz, nc].
quant_losses = [
quantizers[i].quantization_loss for i in range(len(quantizers))
]
model_outputs["quantization_loss"] = jnp.mean(
jnp.stack(quant_losses, axis=0), axis=0
)
return HubertOutput(**model_outputs)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Taxonomy model."""
import dataclasses
from chirp.models import conformer
from chirp.models import frontend
from chirp.models import layers
from chirp.models import output
from flax import linen as nn
from jax import numpy as jnp
class TaxonomyModel(nn.Module):
"""Taxonomy model for bird song classification.
This model classifies the species of bird songs. It predicts multiple labels:
whether a bird is detected, the species, genus, family, and order of the bird,
and the nature of the background noise.
Attributes:
num_classes: Number of classes for each output head.
encoder: A network (e.g., a 2D convolutional network) that takes
spectrograms and returns feature vectors.
taxonomy_loss_weight: Weight for taxonomic label losses.
frontend: The frontend to use to generate features.
hubert_feature_extractor: Optionally, a pre-trained frozen feature extractor
trained in a self-supervised way. This option is mutually exclusive with
frontend and is used for evaluation of self-supervised representations.
"""
num_classes: dict[str, int]
encoder: nn.Module
taxonomy_loss_weight: float
frontend: nn.Module | None = None
hubert_feature_extractor: nn.Module | None = None
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
use_running_average: bool | None = None,
mask: jnp.ndarray | None = None,
) -> output.ClassifierOutput | output.TaxonomyOutput:
"""Apply the taxonomy model.
Args:
inputs: Audio of shape `(batch size, time)`.
train: Whether this is training. This affects Dropout behavior, and also
affects BatchNorm behavior if 'use_running_average' is set to None.
use_running_average: Optional, used to decide whether to use running
statistics in BatchNorm (test mode), or the current batch's statistics
(train mode). If not specified (or specified to None), default to 'not
train'.
mask: An optional mask of the inputs.
Raises: ValueError if both `frontend` and `hubert_feature_extractor` are not
None.
Returns:
Logits for each output head.
"""
if self.frontend is not None and self.hubert_feature_extractor is not None:
raise ValueError(
"`frontend` and `hubert_feature_extractor` are mutually exclusive."
)
if use_running_average is None:
use_running_average = not train
kwargs = {} if mask is None else {"mask": mask}
# Apply the frontend.
if isinstance(self.frontend, layers.EarlyFeatureExtractor):
# EarlyFeatureExtractor expects [B, T, C] inputs.
x = self.frontend(inputs[:, :, jnp.newaxis], train=train) # pylint: disable=not-callable
elif self.frontend is not None:
x = self.frontend(inputs, train=train) # pylint: disable=not-callable
if mask is not None:
# Go from time steps to frames
mask = frontend.frames_mask(mask, self.frontend.stride)
# Add axes for broadcasting over frequencies and channels
kwargs = {"mask": mask[..., jnp.newaxis, jnp.newaxis]}
elif self.hubert_feature_extractor is not None:
x = self.hubert_feature_extractor(inputs) # pylint: disable=not-callable
else:
x = inputs
# Apply the encoder.
while len(x.shape) < 4:
# We may have shape (B, T), (B, T, D), or (B, W, H, D)
x = x[..., jnp.newaxis]
# Treat the spectrogram as a gray-scale image
x = self.encoder(
x, train=train, use_running_average=use_running_average, **kwargs
)
# Classify the encoder outputs and assemble outputs.
model_outputs = {}
model_outputs["embedding"] = x
model_outputs["label"] = nn.Dense(self.num_classes["label"])(x)
if not self.taxonomy_loss_weight or set(self.num_classes) == set(["label"]):
return output.ClassifierOutput(**model_outputs)
for k, n in self.num_classes.items():
if k != "label":
model_outputs[k] = nn.Dense(n)(x)
return output.TaxonomyOutput(**model_outputs)
class ConformerModel(nn.Module):
"""Conformer model."""
num_conformer_blocks: int = 16
features: int = 144
num_heads: int = 4
kernel_size: int = 15
downsample: list[tuple[int, float]] = dataclasses.field(default_factory=list)
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
train: bool,
use_running_average: bool | None = None,
mask: jnp.ndarray | None = None,
) -> jnp.ndarray:
# Subsample from (x, 160) to (x // 4, 40)
x = conformer.ConvolutionalSubsampling(features=self.features)(
inputs, train=train
)
# Apply conformer blocks
x = conformer.Conformer(
model_dims=self.features,
atten_num_heads=self.num_heads,
num_blocks=self.num_conformer_blocks,
kernel_size=self.kernel_size,
downsample=self.downsample,
dropout_prob=0.1,
)(
x,
train=train,
use_running_average=use_running_average,
return_intermediate_list=False,
)
# To get a global embedding we now just pool
return jnp.mean(x, axis=-2)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flax layers for audio processing."""
import functools
from chirp import audio_utils
from flax import linen as nn
from jax import lax
from jax import numpy as jnp
from jax import random
class RandomLowPassFilter(nn.Module):
"""A random low-pass filter in the frequency-domain.
Attributes:
rate: The rate at which random low-pass filters are applied.
deterministic: If true, no low-pass filters are applied.
"""
rate: float
deterministic: bool | None = None
@nn.compact
def __call__(
self, inputs: jnp.ndarray, deterministic: bool | None = None
) -> jnp.ndarray:
"""Applies a random low-pass filter to a mel-spectrogram.
Args:
inputs: A (batch) of mel-spectrograms, assumed to have frequencies on the
last axis.
deterministic: If true, passes the input as is.
Returns:
Aspectrogram with the same size as the input, possibly with a random
low-pass filter applied.
"""
deterministic = nn.merge_param(
'deterministic', self.deterministic, deterministic
)
if self.rate == 0.0 or deterministic:
return inputs
rng = self.make_rng('low_pass')
rate_key, low_pass_key = random.split(rng)
x = lax.cond(
random.uniform(rate_key) < self.rate,
functools.partial(audio_utils.random_low_pass_filter, low_pass_key),
lambda x: x,
inputs,
)
return x
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model outputs."""
import dataclasses
from typing import Protocol, runtime_checkable
import flax
from jax import numpy as jnp
@flax.struct.dataclass
class EmbeddingOutput:
embedding: jnp.ndarray
@flax.struct.dataclass
class ClassifierOutput(EmbeddingOutput):
label: jnp.ndarray
@flax.struct.dataclass
class TaxonomyOutput(ClassifierOutput):
genus: jnp.ndarray
family: jnp.ndarray
order: jnp.ndarray
@runtime_checkable
class AnyOutput(Protocol):
"""Any output must be a dataclass."""
__dataclass_fields__: dict[str, dataclasses.Field] # pylint: disable=g-bare-generic
@runtime_checkable
@dataclasses.dataclass
class TaxonomicOutput(Protocol):
label: jnp.ndarray
genus: jnp.ndarray
family: jnp.ndarray
order: jnp.ndarray
def logits(output) -> dict[str, jnp.ndarray]:
return {
f'{key}_logits': getattr(output, key)
for key in ('label', 'genus', 'family', 'order')
if hasattr(output, key)
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptation of the Soundstream architecture for sound separation."""
from typing import Sequence
from flax import linen as nn
from jax import numpy as jnp
PADDING = "SAME"
class SeparableResnetBlock(nn.Module):
"""Resnet Block using a separable convolution.
Attributes:
num_hidden_filters: Number of hidden filters. If <0, uses the number of
input filters.
kernel_width: Width of depthwise convolutions.
dilation: Convolution dilation.
groups: Number of feature groups.
residual_scalar: Scalar multiplier for residual connection.
padding: Padding style.
"""
num_hidden_filters: int
kernel_width: int = 3
dilation: int = 1
groups: int = 1
residual_scalar: float = 1.0
padding: str = "SAME"
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:
# First convolution is separable.
input_dim = inputs.shape[-1]
if self.num_hidden_filters < 0:
num_hiddens = input_dim
else:
num_hiddens = self.num_hidden_filters
x = nn.swish(inputs)
x = nn.Conv(
features=num_hiddens,
kernel_size=(self.kernel_width,),
strides=1,
kernel_dilation=self.dilation,
use_bias=True,
feature_group_count=input_dim,
padding=self.padding,
)(x)
x = nn.swish(x)
x = nn.Conv(
features=input_dim,
kernel_size=(1,),
strides=1,
use_bias=True,
feature_group_count=self.groups,
padding=self.padding,
)(x)
return x + self.residual_scalar * inputs
class SeparatorBlock(nn.Module):
"""Block of residual layers and down/up-sampling layer.
Attributes:
is_encoder: Whether this is an encoder (downsampling) or decoder
(upsampling) block.
stride: Down/Up sample rate for this block.
feature_mult: Multiplier/divisor for number of feature channels.
groups: Number of feature groups.
num_residual_layers: Number of dilated residual layers.
num_residual_filters: Number of hidden residual filters.
residual_kernel_width: Kernel width for residual layers.
residual_scalar: Scaling constant for residual connections.
padding: Padding style.
"""
is_encoder: bool
stride: int
feature_mult: int
groups: int = 1
num_residual_layers: int = 3
num_residual_filters: int = -1
residual_kernel_width: int = 3
residual_scalar: float = 1.0
padding: str = "SAME"
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:
input_dim = inputs.shape[-1]
x = inputs
if not self.is_encoder:
# Decoder blocks start with up-sampling, for overall model symmetry.
# Note that Linen's ConvTranspose doesn't support grouping, but it should
# be easy to add support by exposing the feature_group_count argument
# in lax's conv_general_dilated.
x = nn.ConvTranspose(
features=input_dim // self.feature_mult,
kernel_size=(self.stride * 2,),
strides=(self.stride,),
use_bias=True,
padding=self.padding,
)(x)
for idx in range(self.num_residual_layers):
x = SeparableResnetBlock(
num_hidden_filters=self.num_residual_filters,
kernel_width=self.residual_kernel_width,
dilation=self.residual_kernel_width**idx,
groups=self.groups,
residual_scalar=self.residual_scalar,
padding=self.padding,
)(x, train)
x = nn.normalization.LayerNorm(reduction_axes=(-2, -1))(x)
x = nn.swish(x)
if self.is_encoder:
x = nn.Conv(
features=input_dim * self.feature_mult,
kernel_size=(self.stride * 2,),
strides=(self.stride,),
use_bias=True,
feature_group_count=self.groups,
padding=self.padding,
)(x)
return x
class SoundstreamUNet(nn.Module):
"""Audio U-Net based on the Soundstream architecture.
Assumes 1D inputs with shape [B, T, D].
Attributes:
base_filters: Number of filters for the input / output layer.
bottleneck_filters: Number of filters in the inner bottleneck conv.
output_filters: Number of filters in final model output.
strides: Number of strides for each SeparatorBlock.
feature_mults: Multiplier for number of features for each SeparatorBlock.
groups: Number of feature groups for each SeparatorBlock.
input_kernel_width: Width of the input convolution.
bottleneck_kernel_width: Width of the bottleneck kernel.
output_kernel_width: Width of the output kernel.
num_residual_layers: Number of dilated residual layers per SeparatorBlock.
residual_scalar: Scalar multiplier for residual connections.
residual_hidden_filters: Number of hidden filters in residual blocks.
unet_scalar: Scalar multiplier for UNet skip connections.
padding: Padding style.
"""
base_filters: int
bottleneck_filters: int
output_filters: int
strides: Sequence[int]
feature_mults: Sequence[int]
groups: Sequence[int]
input_kernel_width: int = 3
bottleneck_kernel_width: int = 3
output_kernel_width: int = 3
num_residual_layers: int = 3
residual_kernel_width: int = 3
residual_scalar: float = 1.0
residual_hidden_filters: int = -1
# TODO(tomdenton): Experiment with a learnable scalar. See TDCN++.
unet_scalar: float = 1.0
padding: str = "SAME"
@nn.compact
def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:
"""Generate separation masks."""
# Stem!
x = nn.Conv(
features=self.base_filters,
kernel_size=(self.input_kernel_width,),
strides=1,
use_bias=True,
padding=self.padding,
)(inputs)
x = nn.normalization.LayerNorm(reduction_axes=(-2, -1))(x)
x = nn.swish(x)
# Encoder!
encoder_outputs = []
for stride, mult, num_groups in zip(
self.strides, self.feature_mults, self.groups
):
x = SeparatorBlock(
is_encoder=True,
stride=stride,
feature_mult=mult,
groups=num_groups,
num_residual_layers=self.num_residual_layers,
num_residual_filters=self.residual_hidden_filters,
residual_kernel_width=self.residual_kernel_width,
residual_scalar=self.residual_scalar,
padding=self.padding,
)(x, train)
encoder_outputs.append(x)
# Bottleneck!
prebottleneck_filters = x.shape[-1]
x = nn.Conv(
features=self.bottleneck_filters,
kernel_size=(self.bottleneck_kernel_width,),
strides=1,
use_bias=True,
padding=self.padding,
)(x)
# Normally this is where one would apply quantization for a codec.
bottleneck_features = x
# Unbottleneck!
x = nn.Conv(
features=prebottleneck_filters,
kernel_size=(self.bottleneck_kernel_width,),
strides=1,
use_bias=True,
padding=self.padding,
)(x)
# Decode!
for stride, mult, num_groups, unet_features in zip(
self.strides[::-1],
self.feature_mults[::-1],
self.groups[::-1],
encoder_outputs[::-1],
):
x = self.unet_scalar * unet_features + x
x = SeparatorBlock(
is_encoder=False,
stride=stride,
feature_mult=mult,
groups=num_groups,
num_residual_layers=self.num_residual_layers,
num_residual_filters=self.residual_hidden_filters,
residual_kernel_width=self.residual_kernel_width,
residual_scalar=self.residual_scalar,
padding=self.padding,
)(x, train)
# Head!
x = nn.Conv(
features=self.output_filters,
kernel_size=(self.output_kernel_width,),
strides=1,
use_bias=True,
padding=self.padding,
)(x)
return x, bottleneck_features # pytype: disable=bad-return-type # jax-ndarray
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scrapes the Xeno-Canto website for taxonomy and audio data."""
import concurrent.futures
import enum
import functools
import itertools
import json
import os.path
from typing import Any, Sequence
from absl import app
from absl import flags
from chirp.data import utils
import pandas as pd
import ratelimiter
import requests
import tensorflow as tf
import tqdm
_XC_API_RATE_LIMIT = 8
_XC_API_URL = 'http://www.xeno-canto.org/api/2/recordings'
_XC_SPECIES_URL = 'https://xeno-canto.org/collection/species/all'
class _Modes(enum.Enum):
COLLECT_INFO = 'collect_info'
_MODE = flags.DEFINE_enum(
'mode', 'collect_info', [mode.value for mode in _Modes], 'Operation mode.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir',
'/tmp/xeno-canto',
'Where to output the taxonomy info DataFrame.',
)
_INFO_FILENAME = flags.DEFINE_string(
'info_filename', 'xeno_canto.jsonl', 'Xeno-Canto info filename.'
)
def collect_info(
output_dir: str, recordings_filename: str
) -> list[dict[str, Any]]:
"""Scrapes the Xeno-Canto website for audio file IDs.
Args:
output_dir: Directory in which to store the list of recordings.
recordings_filename: Filename to which to store recordings.
Returns:
The list of recordings.
"""
# Collect all species
(species,) = pd.read_html(io=_XC_SPECIES_URL, match='Scientific name')
# Query Xeno-Canto for all recordings for each species
session = requests.Session()
session.mount(
'http://',
requests.adapters.HTTPAdapter(
max_retries=requests.adapters.Retry(total=5, backoff_factor=0.1)
),
)
@ratelimiter.RateLimiter(max_calls=_XC_API_RATE_LIMIT, period=1)
def get_recordings(scientific_name: str, page: int = 1):
response = session.get(
url=_XC_API_URL,
params={
'query': f"{scientific_name} gen:{scientific_name.split(' ')[0]}",
'page': page,
},
)
response.raise_for_status()
results = response.json()['recordings']
# Get next page if there are more
if response.json()['numPages'] > page:
results.extend(get_recordings(scientific_name, page + 1))
return results
with concurrent.futures.ThreadPoolExecutor(
max_workers=_XC_API_RATE_LIMIT
) as executor:
species_recordings = executor.map(
get_recordings, species['Scientific name']
)
species_recordings = tqdm.tqdm(
species_recordings, total=len(species), desc='Collecting recordings'
)
recordings = list(itertools.chain.from_iterable(species_recordings))
# Store recordings as JSONL file
with tf.io.gfile.GFile(
os.path.join(output_dir, recordings_filename), 'w'
) as f:
for recording in recordings:
f.write(json.dumps(recording))
f.write('\n')
return recordings
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
modes = {
'collect_info': collect_info,
}
modes[_MODE.value](_OUTPUT_DIR.value, _INFO_FILENAME.value)
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience utilities for handling class lists."""
from chirp.taxonomy import namespace_db
def get_class_lists(species_class_list_name: str, add_taxonomic_labels: bool):
"""Get the number of classes for the target class outputs."""
db = namespace_db.load_db()
species_classes = db.class_lists[species_class_list_name]
class_lists = {
"label": species_classes,
}
if add_taxonomic_labels:
for name in ["genus", "family", "order"]:
mapping_name = f"{species_classes.namespace}_to_{name}"
mapping = db.mappings[mapping_name]
taxa_class_list = species_classes.apply_namespace_mapping(mapping)
class_lists[name] = taxa_class_list
return class_lists
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database of bioacoustic label domains."""
import dataclasses
import functools
import json
import os
import typing
from chirp import path_utils
from chirp.taxonomy import namespace
from etils import epath
TAXONOMY_DATABASE_FILENAME = "taxonomy/taxonomy_database.json"
@dataclasses.dataclass
class TaxonomyDatabase:
namespaces: dict[str, namespace.Namespace]
class_lists: dict[str, namespace.ClassList]
mappings: dict[str, namespace.Mapping]
def validate_taxonomy_database(taxonomy_database: TaxonomyDatabase) -> None:
"""Validate the taxonomy database.
This ensures that all class lists, namespaces, and mappings are consistent.
Args:
taxonomy_database: A taxonomy database structure to validate.
Raises:
ValueError or KeyError when the database is invalid.
"""
namespaces = taxonomy_database.namespaces
for mapping_name, mapping in taxonomy_database.mappings.items():
if (
set(mapping.mapped_pairs.keys())
- namespaces[mapping.source_namespace].classes
):
raise ValueError(
f"Mapping {mapping_name} contains a source class not in "
f"the namespace ({mapping.source_namespace})."
)
if (
set(mapping.mapped_pairs.values())
- namespaces[mapping.target_namespace].classes
):
raise ValueError(
f"Mapping {mapping_name} contains a target class not in "
f"the namespace ({mapping.source_namespace})."
)
for class_name, class_list in taxonomy_database.class_lists.items():
classes = class_list.classes
if set(classes) - namespaces[class_list.namespace].classes > {
namespace.UNKNOWN_LABEL
}:
raise ValueError(
f"ClassList {class_name} contains a class not in "
f"the namespace ({class_list.namespace})."
)
def load_taxonomy_database(
taxonomy_database: dict[str, typing.Any]
) -> TaxonomyDatabase:
"""Construct a taxonomy database from a dictionary.
Args:
taxonomy_database: The database as loaded from a JSON file.
Returns:
A taxonomy database.
Raises:
TypeError when the database contains unknown keys.
"""
namespaces = {
name: namespace.Namespace(
classes=frozenset(namespace_.pop("classes")), **namespace_
)
for name, namespace_ in taxonomy_database.pop("namespaces").items()
}
class_lists = {
name: namespace.ClassList(
classes=tuple(class_list.pop("classes")), **class_list
)
for name, class_list in taxonomy_database.pop("class_lists").items()
}
mappings = {
name: namespace.Mapping(**mapping)
for name, mapping in taxonomy_database.pop("mappings").items()
}
return TaxonomyDatabase(
namespaces=namespaces,
class_lists=class_lists,
mappings=mappings,
**taxonomy_database
)
class TaxonomyDatabaseEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, frozenset):
return sorted(o)
return super().default(o)
def dump_db(taxonomy_database: TaxonomyDatabase, validate: bool = True) -> str:
if validate:
validate_taxonomy_database(taxonomy_database)
return json.dumps(
dataclasses.asdict(taxonomy_database),
cls=TaxonomyDatabaseEncoder,
indent=2,
sort_keys=True,
)
@functools.cache
def load_db(
path: os.PathLike[str] | str = TAXONOMY_DATABASE_FILENAME,
validate: bool = True,
) -> TaxonomyDatabase:
"""Load the taxonomy database.
This loads the taxonomy database from the given JSON file. It converts the
database into Python data structures and optionally validates that the
database is consistent.
Args:
path: The JSON file to load.
validate: If true, it validates the database.
Returns:
The taxonomy database.
"""
with path_utils.open_file(path, "r") as f:
data = json.load(f)
taxonomy_database = load_taxonomy_database(data)
if validate:
validate_taxonomy_database(taxonomy_database)
return taxonomy_database
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for handling namespaces of classes."""
from __future__ import annotations
import csv
import dataclasses
import io
from typing import Iterable
from jax import numpy as jnp
import tensorflow as tf
UNKNOWN_LABEL = "unknown"
@dataclasses.dataclass
class Namespace:
"""A namespace is simply a set of labels.
Note that unknown labels cannot be in a namespace.
Attributes:
classes: A frozenset of labels.
"""
classes: frozenset[str]
def __post_init__(self):
if UNKNOWN_LABEL in self.classes:
raise ValueError("unknown class")
@dataclasses.dataclass
class Mapping:
"""A mapping maps labels from one namespace to labels in another.
Note that this is a n:1 mapping, i.e., multiple labels in the source namespace
can map to the same label in the target namespace.
The source and target namespace are referred to by their name. This name must
be resolved using the taxonomy database.
Note that labels (other than unknown) cannot be mapped to unknown. Instead,
these labels should be simply excluded from the mapping. The end-user is
responsible for deciding whether to map missing keys to unknown or whether to
raise an error, e.g., by using:
mapping.mapped_pairs.get(source_label, namespace.UNKNOWN_LABEL)
Attributes:
source_namespace: The name of the source namespace.
target_namespace: The name of the target namespace.
mapped_pairs: The mapping from labels in the source namespace to labels in
the target namespace.
"""
source_namespace: str
target_namespace: str
mapped_pairs: dict[str, str]
def __post_init__(self):
if UNKNOWN_LABEL in self.mapped_pairs.values():
raise ValueError("unknown target class")
@dataclasses.dataclass
class ClassList:
"""A list of labels.
A class list is a list of labels in a particular order, e.g., to reflect the
output of a model.
Class lists can contain the unknown label. All other labels must belong to a
namespace.
Class lists cannot contain duplicate entries.
Attributes:
namespace: The name of the namespace these class labels belong to.
classes: The list of classes.
"""
namespace: str
classes: tuple[str, ...]
def __post_init__(self):
if len(set(self.classes)) != len(self.classes):
raise ValueError("duplicate entries in class list")
@classmethod
def from_csv(cls, csv_data: Iterable[str]) -> "ClassList":
"""Parse a class list from a CSV file.
The file must contain the namespace in the first column of the first row.
The first column of the remaining rows are assumed to contain the classes.
Args:
csv_data: Any iterable which can be passed on to `csv.reader`.
Returns:
The parsed class list.
"""
reader = csv.reader(csv_data)
namespace = next(reader)[0]
classes = tuple(row[0].strip() for row in reader if row)
return ClassList(namespace, classes)
def to_csv(self) -> str:
"""Write a class list to a CSV file.
See `from_csv` for a description of the file format.
It can be useful to write the class lists to disk so that the model can be
loaded correctly, even if class lists change. However, note that in this
case none of the mappings are guaranteed to still work.
Returns:
A string containing the namespace and the class labels as rows.
"""
buffer = io.StringIO(newline="")
writer = csv.writer(buffer)
writer.writerow([self.namespace])
for class_ in self.classes:
writer.writerow([class_])
return buffer.getvalue()
def get_class_map_tf_lookup(
self, target_class_list: ClassList
) -> tuple[tf.lookup.StaticHashTable, tf.Tensor]:
"""Create a static hash map for class indices.
Create a lookup table for use in TF Datasets, for, eg, converting between
ClassList defined for a dataset to a ClassList used as model outputs.
Classes in the source ClassList which do not appear in the target_class_list
will be mapped to -1. It is recommended to drop these labels subsequently
with: tf.gather(x, tf.where(x >= 0)[:, 0])
Args:
target_class_list: Class list to target.
Returns:
A tensorflow StaticHashTable and an indicator vector for the image of
the classlist mapping.
"""
if self.namespace != target_class_list.namespace:
raise ValueError("namespaces must match when creating a class map.")
intersection = set(self.classes) & set(target_class_list.classes)
intersection = sorted(tuple(intersection))
keys = tuple(self.classes.index(c) for c in intersection)
values = tuple(target_class_list.classes.index(c) for c in intersection)
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys, values, tf.int64, tf.int64),
default_value=-1,
)
image_mask = tf.constant(
[k in self.classes for k in target_class_list.classes],
tf.int64,
)
return table, image_mask
def get_namespace_map_tf_lookup(
self, mapping: Mapping, keep_unknown: bool | None = None
) -> tf.lookup.StaticHashTable:
"""Create a tf.lookup.StaticHasTable for namespace mappings.
Args:
mapping: Mapping to apply.
keep_unknown: How to handle unknowns. If true, then unknown labels in the
class list are maintained as unknown in the mapped values. If false then
the unknown value is discarded. The default (`None`) will raise an error
if an unknown value is in the source classt list.
Returns:
A Tensorflow StaticHashTable and the image ClassList in the mapping's
target namespace.
Raises:
KeyError: If a class in not the mapping, or if the class list contains
an unknown token and `keep_unknown` was not specified.
"""
target_class_list = self.apply_namespace_mapping(
mapping, keep_unknown=keep_unknown
)
target_class_indices = {
k: i for i, k in enumerate(target_class_list.classes)
}
mapped_pairs = mapping.mapped_pairs
if keep_unknown:
mapped_pairs = mapped_pairs | {UNKNOWN_LABEL: UNKNOWN_LABEL}
keys = list(range(len(self.classes)))
values = [
target_class_indices[mapped_pairs[k]]
for k in self.classes
if k != UNKNOWN_LABEL or keep_unknown in (True, None)
]
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys, values, tf.int64, tf.int64),
default_value=-1,
)
return table
def apply_namespace_mapping(
self, mapping: Mapping, keep_unknown: bool | None = None
) -> ClassList:
"""Apply a namespace mapping to this class list.
Args:
mapping: The mapping to apply.
keep_unknown: How to handle unknowns. If true, then unknown labels in the
class list are maintained as unknown in the mapped values. If false then
the unknown value is discarded. The default (`None`) will raise an error
if an unknown value is in the source classt list.
Returns:
A class list which is the result of applying the given mapping to this
class list.
Raises:
KeyError: If a class in not the mapping, or if the class list contains
an unknown token and `keep_unknown` was not specified.
"""
if mapping.source_namespace != self.namespace:
raise ValueError("mapping source namespace does not match class list's")
mapped_pairs = mapping.mapped_pairs
if keep_unknown:
mapped_pairs = mapped_pairs | {UNKNOWN_LABEL: UNKNOWN_LABEL}
return ClassList(
mapping.target_namespace,
tuple(
dict.fromkeys(
mapped_pairs[class_]
for class_ in self.classes
if class_ != UNKNOWN_LABEL or keep_unknown in (True, None)
)
),
)
def get_class_map_matrix(
self,
target_class_list: ClassList,
mapping: Mapping | None = None,
) -> tuple[jnp.ndarray, jnp.ndarray]:
"""Construct a binary matrix for mapping to another class list.
Args:
target_class_list: Class list to map into.
mapping: Namespace mapping, required if the source and target are in
different namespaces.
Returns:
A binary matrix mapping self to target_class_list and an indicator vector
for the image of the mapping.
"""
if self.namespace != target_class_list.namespace and mapping is None:
raise ValueError(
"If source and target classes are from different namespaces, a"
" namespace mapping must be provided."
)
elif self.namespace == target_class_list.namespace and mapping is not None:
raise ValueError(
"If source and target classes are the same, no mapping should be"
" provided."
)
matrix = jnp.zeros([len(self.classes), len(target_class_list.classes)])
target_idxs = {k: i for i, k in enumerate(target_class_list.classes)}
for i, class_ in enumerate(self.classes):
if mapping is not None:
class_ = mapping.mapped_pairs[class_]
if class_ in target_idxs:
j = target_idxs[class_]
matrix = matrix.at[i, j].set(1)
return matrix, jnp.any(matrix, axis=0)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating annotations."""
import csv
import dataclasses
from typing import Callable, Sequence
from etils import epath
import pandas as pd
@dataclasses.dataclass
class TimeWindowAnnotation:
"""An annotation for a particular time window.
Attributes:
filename: Filename for the source audio.
start_time_s: Float representing the start of this annotation window.
end_time_s: Float representing the end of this annotation window.
namespace: The namespace of the classes in this annotation.
label: List of classes present in the audio segment.
"""
filename: str
start_time_s: float
end_time_s: float
namespace: str
label: Sequence[str]
def annotations_to_dataframe(
annotations: Sequence[TimeWindowAnnotation],
) -> pd.DataFrame:
return pd.DataFrame.from_records(
[dataclasses.asdict(anno) for anno in annotations]
)
def write_annotations_csv(filepath, annotations):
fieldnames = [f.name for f in dataclasses.fields(TimeWindowAnnotation)]
fieldnames.remove('namespace')
with epath.Path(filepath).open('w') as f:
dr = csv.DictWriter(f, fieldnames)
dr.writeheader()
for anno in annotations:
anno_dict = {f: getattr(anno, f) for f in fieldnames}
anno_dict['label'] = ' '.join(anno_dict['label'])
dr.writerow(anno_dict)
def read_dataset_annotations_csvs(
filepaths: Sequence[epath.Path],
filename_fn: Callable[[epath.Path, dict[str, str]], str],
namespace: str,
class_fn: Callable[[dict[str, str]], Sequence[str]],
start_time_fn: Callable[[dict[str, str]], float],
end_time_fn: Callable[[dict[str, str]], float],
filter_fn: Callable[[dict[str, str]], bool] | None = None,
delimiter: str = ',',
) -> Sequence[TimeWindowAnnotation]:
"""Create annotations from a random CSV.
Args:
filepaths: Path to the CSV files.
filename_fn: Function for extracting the audio filename. Maps
(annotations_filename, row) to the filename of the audio.
namespace: Namespace for the annotated classes.
class_fn: Function for extracting classname.
start_time_fn: Field for starting timestamps. Currently assumes values are
floats measured in seconds.
end_time_fn: Field for ending timestamps.
filter_fn: A function for selecting rows of the annotation file to ignore.
Will keep rows where filter_fn is False, and ignore rows where True.
delimiter: Field separating character in the target file.
Returns:
List of TimeWindowAnnotations.
"""
annotations = []
for filepath in filepaths:
with filepath.open('r') as f:
reader = csv.DictReader(f, delimiter=delimiter)
for row in reader:
if filter_fn and filter_fn(row):
continue
filename = filename_fn(filepath, row)
start = start_time_fn(row)
end = end_time_fn(row)
classes = class_fn(row)
annotations.append(
TimeWindowAnnotation(filename, start, end, namespace, classes)
)
return annotations
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load eBird/Clements labels from source data."""
import typing
from absl import app
from absl import flags
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
import numpy as np
import pandas as pd
_SOURCE_FILE = flags.DEFINE_string(
'source_file', 'source_data/ebird_taxonomy_v2022.csv', 'CSV file to load.'
)
_PREFIX = flags.DEFINE_string(
'prefix',
'ebird2022',
'The prefix to attach to the generated namespaces, class lists, and'
' mappings.',
)
_OUTPUT_FILE = flags.DEFINE_string(
'output_file', 'taxonomy_database.json', 'Output file.'
)
SEABIRD_FAMILIES = {
'sulidae',
'fregatidae',
'stercorariidae',
'laridae',
'alcidae',
'scolopacidae',
}
SEABIRD_ORDERS = {
'sphenisciformes',
'procellariiformes',
}
def parse_ebird(
source_file: str | typing.TextIO, prefix: str
) -> namespace_db.TaxonomyDatabase:
"""Parse an eBird CSV source file.
This parses the CSV file and generates a taxonomy database containing
namespaces for all eBird codes and for all species codes. It also contains
namespaces for the scientific names of the species, genera, families, and
orders. A separate namespace is created containing all the identifiable
subspecific groups (ISSFs).
Mappings are created to map eBird codes to their species, genus, family, and
order.
Lastly, a class list is created that contains all seabird species.
Args:
source_file: The path or file-like object containing the source file.
prefix: The prefix to use for all the generated data (e.g., `ebird2021` or
`ebird2022`, to distinguish between versions).
Returns:
A `TaxonomyDatabase` containing all the generated data.
"""
# Load the CSV data
df = pd.read_csv(source_file)
# Lower-case the data
df = df.applymap(lambda x: x.lower() if isinstance(x, str) else x)
# Extract the genus from the scientific name
df['genus'] = df['SCI_NAME'].str.split(' ').str[0]
# Only keep the scientific family name (ignore the common name)
df['family'] = df['FAMILY'].str.split(' ').str[0]
# Correction to spuhs
df.loc[
(df['CATEGORY'] == 'spuh')
& ((df['genus'] == df['ORDER1']) | (df['genus'] == df['family'])),
'genus',
] = np.nan
# Report species as themselves
df.loc[df['CATEGORY'] == 'species', 'REPORT_AS'] = df.loc[
df['CATEGORY'] == 'species', 'SPECIES_CODE'
]
# Namespaces (dictionary key is the name of the namespace)
namespaces = {
'': df['SPECIES_CODE'],
'species': df.loc[df['CATEGORY'] == 'species', 'SPECIES_CODE'],
'issf': df.loc[df['CATEGORY'] == 'issf', 'SPECIES_CODE'],
'genera': df['genus'].drop_duplicates().dropna(),
'families': df['family'].drop_duplicates().dropna(),
'orders': df['ORDER1'].drop_duplicates().dropna(),
'clements': df.loc[df['CATEGORY'] == 'species', 'SCI_NAME'],
}
# The keys are (mapping name, source namespace, target namespace)
mappings = {
('to_species', '', 'species'): df[
# Only select rows which should be reported as a species
df.merge(
df, left_on='REPORT_AS', right_on='SPECIES_CODE', how='left'
)['CATEGORY_y']
== 'species'
][['SPECIES_CODE', 'REPORT_AS']],
}
for mask, suffix in (
(df['CATEGORY'] == 'species', 'species'),
(slice(None), ''),
):
prefix_ = suffix + '_' if suffix else ''
mappings |= {
(prefix_ + 'to_genus', suffix, 'genera'): df[mask][
['SPECIES_CODE', 'genus']
],
(prefix_ + 'to_family', suffix, 'families'): df[mask][
['SPECIES_CODE', 'family']
],
(prefix_ + 'to_order', suffix, 'orders'): df[mask][
['SPECIES_CODE', 'ORDER1']
],
}
if SEABIRD_FAMILIES - set(df['family']):
raise ValueError('seabird families not found in eBird data')
if SEABIRD_ORDERS - set(df['ORDER1']):
raise ValueError('seabird orders not found in eBird data')
seabirds = df[
df['family'].isin(SEABIRD_FAMILIES) | df['ORDER1'].isin(SEABIRD_ORDERS)
]
# The keys are class list name, namespace
class_lists = {
('global_seabirds', 'species'): seabirds.loc[
seabirds['CATEGORY'] == 'species', 'SPECIES_CODE'
],
}
# Add the prefixes and create the database
add_prefix = lambda name: (prefix + '_' + name).strip('_')
namespaces_ = {}
for name, classes in namespaces.items():
namespaces_[add_prefix(name)] = namespace.Namespace(frozenset(classes))
class_lists_ = {}
for (name, ns), classes in class_lists.items():
class_lists_[add_prefix(name)] = namespace.ClassList(
add_prefix(ns), tuple(sorted(classes))
)
mappings_ = {}
for (name, source_ns, target_ns), mapping in mappings.items():
# Some spuhs don't have a genus, and this was set to nan. Drop these from
# the mappings.
mapping = mapping.dropna()
mappings_[add_prefix(name)] = namespace.Mapping(
add_prefix(source_ns),
add_prefix(target_ns),
dict(zip(mapping.iloc[:, 0], mapping.iloc[:, 1])),
)
return namespace_db.TaxonomyDatabase(namespaces_, class_lists_, mappings_)
def main(argv: list[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
ebird_db = parse_ebird(_SOURCE_FILE.value, _PREFIX.value)
# Merge into existing database and write
db = namespace_db.load_db()
db.namespaces |= ebird_db.namespaces
db.mappings |= ebird_db.mappings
db.class_lists |= ebird_db.class_lists
with open(_OUTPUT_FILE.value, 'w') as f:
f.write(namespace_db.dump_db(db))
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline separation model."""
from chirp import config_utils
from chirp.configs import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = presets.get_base_config(batch_size=8, num_train_steps=5_000_000)
config.train_dataset_config = presets.get_supervised_train_pipeline(
config,
mixin_prob=1.0,
train_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
)
config.train_dataset_config.split = 'train[:99%]'
eval_dataset_config = config_dict.ConfigDict()
eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=True,
),
_c('pipeline.MixAudio', mixin_prob=1.0),
_c(
'pipeline.Batch',
batch_size=config.batch_size,
split_across_devices=True,
),
_c(
'pipeline.Slice',
window_size=config.get_ref('eval_window_size_s'),
start=0.0,
),
_c('pipeline.NormalizeAudio', target_gain=0.45),
],
)
eval_dataset_config.split = 'train[99%:]'
eval_dataset_config.tfds_data_dir = config.tfds_data_dir
eval_dataset_config.dataset_directory = 'bird_taxonomy/slice_peaked:1.4.0'
config.eval_dataset_config = eval_dataset_config
# Experiment configuration
config.init_config = presets.get_base_init_config(config)
# Model Configuration
model_config = config_dict.ConfigDict()
model_config.num_mask_channels = 4
model_config.mask_kernel_size = 3
model_config.classify_bottleneck = True
model_config.classify_pool_width = 50
model_config.classify_stride = 50
model_config.classify_features = 512
config.init_config.model_config = model_config
# Mask generator model configuration
soundstream_config = config_dict.ConfigDict()
soundstream_config.base_filters = 128
# Bottleneck filters has minimal impact on quality.
soundstream_config.bottleneck_filters = 128
soundstream_config.output_filters = 1024
soundstream_config.num_residual_layers = 5
soundstream_config.strides = (5, 2, 2)
soundstream_config.feature_mults = (2, 2, 2)
soundstream_config.groups = (1, 1, 1)
soundstream_config.unet_scalar = 1.0
model_config.mask_generator = config_utils.callable_config(
'soundstream_unet.SoundstreamUNet', soundstream_config
)
# Frontend configuration
stride = config_dict.FieldReference(32)
frontend_config = config_dict.ConfigDict()
frontend_config.features = 128
frontend_config.stride = stride
inverse_frontend_config = config_dict.ConfigDict()
inverse_frontend_config.stride = stride
kernel_size = config_dict.FieldReference(128)
frontend_config.kernel_size = kernel_size
inverse_frontend_config.kernel_size = kernel_size
model_config.bank_transform = config_utils.callable_config(
'frontend.LearnedFrontend', frontend_config
)
model_config.unbank_transform = config_utils.callable_config(
'frontend.InverseLearnedFrontend', inverse_frontend_config
)
model_config.bank_is_real = True
# Training loop configuration
config.train_config = presets.get_base_train_config(config)
config.train_config.loss_max_snr = 30.0
config.train_config.classify_bottleneck_weight = 100.0
config.train_config.taxonomy_labels_weight = 1.0
config.eval_config = presets.get_base_eval_config(config)
config.eval_config.eval_steps_per_checkpoint = 100
config.eval_config.loss_max_snr = config.train_config.get_ref('loss_max_snr')
config.eval_config.taxonomy_labels_weight = config.train_config.get_ref(
'taxonomy_labels_weight'
)
# Note: frame_size should be divisible by the product of all downsampling
# strides in the model architecture (eg, 32 * 5 * 2 * 2 * 50, for
# frontend_config.stride=32, and soundstream_config.strides=[5, 2, 2]),
# and classify_stride=50.
config.export_config = config_dict.ConfigDict()
config.export_config.frame_size = 32000
config.export_config.num_train_steps = config.get_ref('num_train_steps')
return config
def get_hyper(hyper):
return hyper.sweep(
'config.init_config.model_config.num_mask_channels',
hyper.discrete([6]),
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
# Add an intermediate quantizer (here at layer 6) in addition to the
# usual mel-spec quantizer (-2)
config.init_config.model_config.quantizer_points = (-2, 6)
return config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.