python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample evaluation protocol v1 configuration."""
from chirp import config_utils
from chirp.configs import baseline_attention
from chirp.configs import eval_protocol_v1_base
from ml_collections import config_dict
_c = config_utils.callable_config
_object_config = config_utils.object_config
def get_config() -> config_dict.ConfigDict:
"""Creates a configuration dictionary for the evaluation protocol v1."""
config = eval_protocol_v1_base.get_config()
baseline_attention_config = baseline_attention.get_config()
# The model_callback is expected to be a Callable[[np.ndarray], np.ndarray].
model_checkpoint_path = config_dict.FieldReference('')
config.model_checkpoint_path = model_checkpoint_path
config.model_callback = _c(
'eval_lib.TaxonomyModelCallback',
init_config=baseline_attention_config.init_config,
workdir=model_checkpoint_path,
)
# TODO(bringingjoy): extend create_species_query to support returning multiple
# queries for a given eval species.
config.create_species_query = _object_config('eval_lib.create_averaged_query')
config.score_search = _object_config('eval_lib.cosine_similarity')
# Determines the ordering of search results for use in average-precision based
# metrics. For similarity-based metrics, set sort_descending to True. For
# distance-based metrics, set this to False (for ascending ordering).
config.sort_descending = None
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base config for model evaluation using the v2 protocol.
This config sets up model evaluation using the generalization evaluation
framework over windowed and densely annotated examples without applying any
aggregation of scored search results at the recording or annotation level. The
evaluation is performed over the specified pre-trained model.
"""
import itertools
from typing import Dict, Sequence
from chirp import config_utils
from ml_collections import config_dict
_callable_config = config_utils.callable_config
_object_config = config_utils.object_config
_TFDS_DATA_DIR = None
_EVAL_REGIONS = (
'ssw',
'coffee_farms',
'hawaii',
'high_sierras',
'sierras_kahl', # Sierra Nevada region
'peru',
)
_CORPUS_TYPES = ('xc_fg', 'xc_bg', 'soundscapes')
_NUM_REPS = (1, 2, 4, 8, 16)
_SEEDS = (1, 2, 3, 4, 5)
def build_eval_set_specs() -> Dict[str, config_dict.ConfigDict]:
"""Build EvalSetSpecifications with which to construct all eval datasets.
In v2, a varied number of class representatives will be used to produce model
embeddings to form the basis for species queries during eval_lib.search(). The
representatives are resampled len(_SEEDS) times to be able to compute metric
confidence intervals.
Returns:
A mapping of eval set specifier to a ConfigDict containing the unparsed
EvalSetSpecification configs.
"""
eval_set_specifications = {}
for corpus_type, location in itertools.product(_CORPUS_TYPES, _EVAL_REGIONS):
for k, seed in itertools.product(_NUM_REPS, _SEEDS):
eval_set_specifications[f'{location}_{corpus_type}_{k}_seed{seed}'] = (
_callable_config(
'eval_lib.EvalSetSpecification.v2_specification',
location=location,
corpus_type=corpus_type,
num_representatives_per_class=k,
)
)
return eval_set_specifications
def get_config(
data_ops: Sequence[config_dict.ConfigDict] | None = None,
) -> config_dict.ConfigDict:
"""Creates a base configuration dictionary for the v2 evaluation protocol.
The v2 protocol evaluates on artificially rare Sapsucker Woods (SSW) species
and on held-out Colombia and Hawaii species.
Args:
data_ops: An optional sequence of additional pipeline preprocessing data ops
to add to the default configuration.
Returns:
The base configuration dictionary for the v2 evaluation protocol.
"""
# If no additional data pipeline ops are passed, update to empty list for
# downstream concatenation type matching.
if not data_ops:
data_ops = []
config = config_dict.ConfigDict()
tfds_data_dir = config_dict.FieldReference(_TFDS_DATA_DIR)
config.tfds_data_dir = tfds_data_dir
# The PRNG seed controls the random subsampling of class representatives down
# to the right number of when forming eval sets.
config.rng_seed = 1234
config.write_results_dir = '/tmp/'
config.batch_size = 1024
# Xeno-Canto's slice_peaked variants contain 6-second audio segments that are
# randomly cropped to 5-second segments during training. At evaluation, we
# center-crop them down to 5-second segments. Soundscapes' audio segments are
# already 5-seconds long and do not need any cropping.
xc_window_size_seconds = 5
xc_slice_start = 0.5
# Hyperparameters for the v2 evaluation which uses strided windowing and
# dense annotation.
config.window_length_sec = 5
config.window_stride_sec = 2.5
config.overlap_threshold_sec = None
required_datasets = (
{
'dataset_name': 'xc_class_reps',
'tfds_name': 'bird_taxonomy/class_representatives_slice_peaked:2.*.*',
},
# The `xc_downstream` dataset includes feasible artificially rare species
# and downstream species with which to construct search corpora.
{
'dataset_name': 'xc_downstream',
'tfds_name': 'bird_taxonomy/downstream_full_length:2.*.*',
},
{
'dataset_name': 'soundscapes_ssw',
'tfds_name': 'soundscapes/ssw_full_length',
},
{
'dataset_name': 'soundscapes_coffee_farms',
'tfds_name': 'soundscapes/coffee_farms_full_length',
},
{
'dataset_name': 'soundscapes_hawaii',
'tfds_name': 'soundscapes/hawaii_full_length',
},
{
'dataset_name': 'soundscapes_peru',
'tfds_name': 'soundscapes/peru_full_length',
},
{
'dataset_name': 'soundscapes_high_sierras',
'tfds_name': 'soundscapes/high_sierras_full_length',
},
{
'dataset_name': 'soundscapes_sierras_kahl',
'tfds_name': 'soundscapes/sierras_kahl_full_length',
},
)
# Construct Pipelines to process slice-peaked and full-length datasets.
# Xeno-Canto class representative data needs to be cropped down to 5sec before
# normalizing the audio.
slice_peaked_pipeline_ops = [
_callable_config(
'pipeline.Slice',
window_size=xc_window_size_seconds,
start=xc_slice_start,
),
_callable_config(
'pipeline.OnlyKeep',
names=[
'audio',
'label',
'bg_labels',
'recording_id',
'segment_id',
'segment_start',
'segment_end',
],
),
_callable_config('pipeline.LabelsToString'),
]
# Full-length Xeno-Canto recordings are processed to extract strided windows.
# Each strided window receives the recording-level annotations. (Note that for
# this dataset, we do not have human segment-level annotations, so we do not
# follow the same process as with soundscapes downstream full-length
# recordings.)
full_length_xc_pipeline_ops = [
_callable_config(
'pipeline.ExtractStridedWindows',
window_length_sec=config.window_length_sec,
window_stride_sec=config.window_stride_sec,
pad_end=True,
),
_callable_config(
'pipeline.OnlyKeep',
names=[
'audio',
'label',
'bg_labels',
'recording_id',
'segment_id',
'segment_start',
'segment_end',
],
),
# NOTE: this pipeline operation should be applied after window extraction,
# dense annotation, and the OnlyKeep operation. This op turns a sequence
# of labels into a single space-separated string of species codes;
# the previous ops assume that labels are sequences of int IDs.
_callable_config('pipeline.LabelsToString'),
]
# Full-length recordings are used to construct the search corpora data for
# soundscapes. Slices are constructed using strided windowing and dense
# annotation.
full_length_soundscapes_pipeline_ops = [
_callable_config(
'pipeline.ExtractStridedWindows',
window_length_sec=config.window_length_sec,
window_stride_sec=config.window_stride_sec,
pad_end=True,
),
_callable_config(
'pipeline.DenselyAnnotateWindows',
overlap_threshold_sec=config.overlap_threshold_sec,
drop_annotation_bounds=True,
),
_callable_config(
'pipeline.OnlyKeep',
names=[
'audio',
'label',
'bg_labels',
'recording_id',
'segment_id',
'segment_start',
'segment_end',
],
),
# NOTE: this pipeline operation should be applied at the very end, as it
# turns a sequence of labels into a single space-separated string of
# species codes. Previous ops in the pipeline assume that labels are
# sequences of integer IDs.
_callable_config('pipeline.LabelsToString'),
]
dataset_configs = {}
for dataset_description in required_datasets:
dataset_config = config_dict.ConfigDict()
dataset_config.tfds_name = dataset_description['tfds_name']
dataset_config.tfds_data_dir = tfds_data_dir
if dataset_description['dataset_name'] == 'xc_class_reps':
ops = slice_peaked_pipeline_ops + data_ops
elif dataset_description['dataset_name'] == 'xc_downstream':
ops = full_length_xc_pipeline_ops + data_ops
else:
ops = full_length_soundscapes_pipeline_ops + data_ops
dataset_config.pipeline = _callable_config(
'pipeline.Pipeline', ops=ops, deterministic=True
)
dataset_config.split = 'train'
dataset_configs[dataset_description['dataset_name']] = dataset_config
config.dataset_configs = dataset_configs
# Build all eval set specifications.
config.eval_set_specifications = build_eval_set_specs()
config.debug = config_dict.ConfigDict()
# Path to the embedded dataset cache. If set, the embedded dataset will be
# cached at that path and used upon subsequent runs without recomputing the
# embeddings.
#
# **WARNING**: only use to speed up debugging. When the path is set and a
# cache, already exists, the model callback will be ignored. No effect will
# occur if there are updates to the model without updating the cache path
# (i.e. metrics will be computed with respect to a previous model callback's
# embeddings).
config.debug.embedded_dataset_cache_path = ''
# The following two fields should be populated by the user in an eval config.
# Each should point to a local function, callable, or one of the provided
# functions in
# google-research/chirp/eval/eval_lib.py.
config.create_species_query = None
# Determines the ordering of search results for use in average-precision based
# metrics. For similarity-based metrics, set sort_descending to True. For
# distance-based metrics, set this to False (for ascending ordering).
config.sort_descending = None
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
sample_rate_hz = config_dict.FieldReference(32_000)
batch_size = config_dict.FieldReference(768)
target_class_list = config_dict.FieldReference("xenocanto")
add_taxonomic_labels = config_dict.FieldReference(False)
config = config_dict.ConfigDict()
config.sample_rate_hz = sample_rate_hz
config.batch_size = batch_size
# Configure the data
window_size_s = config_dict.FieldReference(5)
train_dataset_config = config_dict.ConfigDict()
train_dataset_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c("pipeline.Shuffle", shuffle_buffer_size=512),
_c("pipeline.OnlyKeep", names=["audio", "label"]),
_c(
"pipeline.ConvertBirdTaxonomyLabels",
source_namespace="ebird2021",
target_class_list=target_class_list,
add_taxonomic_labels=add_taxonomic_labels,
),
_c(
"pipeline.Batch", batch_size=batch_size, split_across_devices=True
),
_c("pipeline.RandomSlice", window_size=window_size_s),
_c(
"pipeline.MelSpectrogram",
features=160,
stride=sample_rate_hz // 128,
kernel_size=2_048, # ~0.08 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=_c("frontend.PCENScalingConfig", conv_width=256),
),
_c("pipeline.AddChannel"),
_c("pipeline.Repeat"),
],
)
train_dataset_config.split = "train"
config.train_dataset_config = train_dataset_config
eval_dataset_config = config_dict.ConfigDict()
eval_dataset_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c("pipeline.OnlyKeep", names=["audio", "label"]),
_c(
"pipeline.ConvertBirdTaxonomyLabels",
source_namespace="ebird2021",
target_class_list=target_class_list,
add_taxonomic_labels=add_taxonomic_labels,
),
_c(
"pipeline.Batch", batch_size=batch_size, split_across_devices=True
),
_c("pipeline.Slice", window_size=window_size_s, start=0.0),
_c(
"pipeline.MelSpectrogram",
features=160,
stride=sample_rate_hz // 128,
kernel_size=2_048, # ~0.08 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=_c("frontend.PCENScalingConfig", conv_width=256),
),
_c("pipeline.AddChannel"),
],
)
eval_dataset_config.split = "train"
config.eval_dataset_config = eval_dataset_config
# Configure the experiment setup
init_config = config_dict.ConfigDict()
init_config.learning_rate = 0.0001
init_config.input_shape = (640, 160, 1)
init_config.rng_seed = 0
init_config.target_class_list = target_class_list
config.init_config = init_config
model_config = config_dict.ConfigDict()
init_config.model_config = model_config
mae_init_config = config_dict.ConfigDict()
mae_init_config.learning_rate = 0.0001
mae_init_config.input_shape = (640, 160, 1)
mae_init_config.rng_seed = 0
model_config.mae_init_config = mae_init_config
mae_model_config = config_dict.ConfigDict()
mae_init_config.model_config = mae_model_config
# Configure the training loop
num_train_steps = config_dict.FieldReference(1_000_000)
train_config = config_dict.ConfigDict()
train_config.num_train_steps = num_train_steps
train_config.log_every_steps = 250
train_config.checkpoint_every_steps = 25_000
config.train_config = train_config
eval_config = config_dict.ConfigDict()
eval_config.num_train_steps = num_train_steps
eval_config.eval_steps_per_checkpoint = 1000
eval_config.input_shape = (640, 160, 1)
config.eval_config = eval_config
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train only a Product Quantizer."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.learning_rate_schedule = "cosine_decay"
config.init_config.quant_start_learning_rate = 0.1
config.init_config.model_config.readout_points = [0]
config.train_config.readout_loss_mult = 0
config.train_config.hubert_loss_mult = 0
return config
def get_hyper(hyper):
return hyper.sweep(
"config.init_config.quant_start_learning_rate", hyper.discrete([0.1])
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.learning_rate_schedule = "cosine_decay"
config.init_config.start_learning_rate = 0.0001
config.init_config.reload_hubert_from = ""
# Decide on which layer to add the supervision.
# note that index 6 is the last layer!
config.init_config.model_config.omit_classifier_stop_grads = (6,)
return config
def get_hyper(hyper):
return hyper.sweep(
"config.init_config.start_learning_rate", hyper.discrete([0.0001])
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model with conformer."""
from chirp import config_utils
from chirp.configs import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = presets.get_base_config(
frame_rate_hz=100,
num_channels=160,
)
config.train_dataset_config = presets.get_supervised_train_pipeline(
config,
mixin_prob=0.75,
train_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
)
config.eval_dataset_config = presets.get_supervised_eval_pipeline(
config, 'soundscapes/caples:1.1.0'
)
# Configure the experiment setup
input_shape = (
config.get_ref('train_window_size_s') * config.get_ref('sample_rate_hz'),
)
config.init_config = presets.get_base_init_config(
config, input_shape=input_shape
)
config.init_config.optimizer = _c(
'optax.adam', learning_rate=config.init_config.get_ref('learning_rate')
)
model_config = config_dict.ConfigDict()
model_config.frontend = presets.get_pcen_melspec_config(config)
# Aim to have output targets of 256, starting at 144
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
# Each downsample reduces time by a factor of 2.
# An additional downsample by 4 happens in the ConvolutionalSubsampling.
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
kernel_size=15,
)
model_config.taxonomy_loss_weight = 0.001
config.init_config.model_config = model_config
# Configure the training loop
config.train_config = presets.get_base_train_config(config)
config.eval_config = presets.get_base_eval_config(
config, input_shape=input_shape
)
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preset Configurations.
Philosophy:
* The base_config contains values which will be re-used throughout the main
configuration.
* Refer to values in the base_config using `get_ref` to ensure that the values
update correctly when performing hyperparameter sweeps or testing.
* The config_utils.parse_config resolves all references, so that downstream
code doesn't panic.
"""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
def get_base_config(**kwargs):
"""Create the base config object.
Contains common values and FieldReferences.
Args:
**kwargs: Values to add or override in the base config.
Returns:
Config dict containing common default values.
"""
config = config_dict.ConfigDict()
config.sample_rate_hz = 32000
config.train_window_size_s = 5
config.eval_window_size_s = 5
config.frame_rate_hz = 100
config.num_channels = 160
config.batch_size = 128
config.add_taxonomic_labels = True
config.target_class_list = 'xenocanto'
config.num_train_steps = 4_000_000
config.num_quantizer_pretrain_steps = 0
config.pad_mask = False
config.tfds_data_dir = ''
config.update(kwargs)
return config
def get_base_init_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default init config."""
init_config = config_dict.ConfigDict()
init_config.input_shape = (
config.get_ref('train_window_size_s') * config.get_ref('sample_rate_hz'),
)
init_config.learning_rate_schedule = 'piecewise_linear'
init_config.learning_rate = 0.0001
init_config.start_learning_rate = 0.000001
init_config.quant_start_learning_rate = 1e-5
init_config.rng_seed = 0
init_config.target_class_list = config.get_ref('target_class_list')
init_config.reload_quantizer_from = ''
init_config.reload_hubert_from = ''
init_config.reload_hubert_omit_quantizers = False
init_config.update(**kwargs)
return init_config
def get_base_train_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default train config."""
train_config = config_dict.ConfigDict()
train_config.num_train_steps = config.get_ref('num_train_steps')
train_config.num_quantizer_pretrain_steps = config.get_ref(
'num_quantizer_pretrain_steps'
)
train_config.log_every_steps = 250
train_config.checkpoint_every_steps = 25_000
train_config.readout_loss_mult = 100
train_config.hubert_loss_mult = 1
train_config.quant_loss_mult = 1
train_config.update(**kwargs)
return train_config
def get_base_eval_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default eval config."""
eval_config = config_dict.ConfigDict()
eval_config.num_train_steps = config.get_ref('num_train_steps')
eval_config.train_mode_at_eval = False
eval_config.mask_at_eval = False
eval_config.update(**kwargs)
return eval_config
def get_frontend_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Get the frontend config."""
frontend_config = config_dict.ConfigDict()
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
frontend_config.features = config.get_ref('num_channels')
frontend_config.stride = frontend_stride
# ~0.08 * 32,000 -- note: in previous HuBERT configs, this was 2_560
frontend_config.kernel_size = 2_048
frontend_config.sample_rate = config.get_ref('sample_rate_hz')
frontend_config.freq_range = (60, 10_000)
frontend_config.scaling_config = config_utils.callable_config(
'frontend.PCENScalingConfig',
conv_width=256,
)
frontend_config.omit_frontend = False
frontend_config.update(**kwargs)
return frontend_config
def get_train_pipeline(
config: config_dict.ConfigDict, mixin_prob: float, train_dataset_dir: str
) -> config_dict.ConfigDict:
"""Create the supervised training data pipeline."""
train_dataset_config = config_dict.ConfigDict()
train_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.Shuffle', shuffle_buffer_size=512),
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=config.get_ref('add_taxonomic_labels'),
),
_c('pipeline.MixAudio', mixin_prob=mixin_prob),
_c(
'pipeline.Pad',
pad_size=config.get_ref('train_window_size_s'),
add_mask=config.get_ref('pad_mask'),
),
_c(
'pipeline.RandomSlice',
window_size=config.get_ref('train_window_size_s'),
),
_c(
'pipeline.Batch',
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
),
_c('pipeline.RandomNormalizeAudio', min_gain=0.15, max_gain=0.25),
_c('pipeline.Repeat'),
],
)
train_dataset_config.split = 'train'
train_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
train_dataset_config.dataset_directory = train_dataset_dir
return train_dataset_config
def get_eval_pipeline(
config: config_dict.ConfigDict, eval_dataset_dir: str
) -> config_dict.ConfigDict:
"""Create Caples eval data pipeline."""
eval_dataset_config = config_dict.ConfigDict()
eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=config.get_ref('add_taxonomic_labels'),
),
_c(
'pipeline.Pad',
pad_size=config.get_ref('eval_window_size_s'),
random=False,
add_mask=config.get_ref('pad_mask'),
),
_c(
'pipeline.Slice',
window_size=config.get_ref('eval_window_size_s'),
start=0.0,
),
_c(
'pipeline.Batch',
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
),
_c('pipeline.NormalizeAudio', target_gain=0.2),
],
)
eval_dataset_config.split = 'train'
eval_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
eval_dataset_config.dataset_directory = eval_dataset_dir
return eval_dataset_config
def get_conformer_config(**kwargs) -> config_dict.ConfigDict:
"""Default conformer config."""
conformer_config = config_dict.ConfigDict()
conformer_config.model_dims = 768
conformer_config.kernel_size = 32
conformer_config.ff_activation = config_utils.object_config('nn.swish')
conformer_config.ff_residual_weight = 0.5
conformer_config.ffn_dim_multiplier = 4
conformer_config.atten_num_heads = 8
conformer_config.layer_order = 'mhsa_before_conv'
conformer_config.dropout_prob = 0.0
conformer_config.conv_residual_dropout = None
conformer_config.atten_residual_dropout = None
conformer_config.ffn_residual_dropout = None
conformer_config.atten_dropout = None
conformer_config.ffn_relu_dropout = None
conformer_config.fflayer_weight_sharing = False
conformer_config.num_blocks = 12
conformer_config.skip_layer_norm = True
conformer_config.update(**kwargs)
return conformer_config
def get_early_fs_config(**kwargs) -> config_dict.ConfigDict:
"""Default early feature extractor config."""
early_fs_config = config_dict.ConfigDict()
early_fs_config.omit_earlyfs = False
early_fs_config.dropout_prob = 0.0
early_fs_config.activation = config_utils.object_config('nn.gelu')
early_fs_config.num_frames = 500
early_fs_config.deprecated_group_conv = False
early_fs_config.update(**kwargs)
return early_fs_config
def get_mask_config(**kwargs) -> config_dict.ConfigDict:
"""Default mask config."""
mask_config = config_dict.ConfigDict()
mask_config.mask_prob = 0.16
mask_config.mask_length = 10
mask_config.min_masks = 1
mask_config.update(**kwargs)
return mask_config
def get_classifier_config(**kwargs) -> config_dict.ConfigDict:
"""Default classifier config."""
classifier_config = config_dict.ConfigDict()
classifier_config.classify_from_all = True
classifier_config.per_frame_predictions = True
classifier_config.classify_pool_width = 3
classifier_config.classify_stride = 3
classifier_config.classify_features = 512
classifier_config.reduction_type = 'AVG'
classifier_config.update(**kwargs)
return classifier_config
def get_quantizer_config(
**kwargs,
) -> config_dict.ConfigDict:
"""Default quantizer config."""
quantizer_config = config_dict.ConfigDict()
quantizer_config.num_sections = 16
quantizer_config.strategy = 'product_quantization'
quantizer_config.use_entropy_quantizer = True
quantizer_config.update(**kwargs)
return quantizer_config
def get_base_quantizer_config(
**kwargs,
) -> config_dict.ConfigDict:
"""Default base quantizer config."""
base_quantizer_config = config_dict.ConfigDict()
base_quantizer_config.num_centroids = 64
base_quantizer_config.gamma = 2
base_quantizer_config.init_scale = 0.1
base_quantizer_config.update(**kwargs)
return base_quantizer_config
def get_model_config(**kwargs) -> config_dict.ConfigDict:
"""Default model config."""
model_config = config_dict.ConfigDict()
model_config.final_dim = 64 # the dim to project *each feature section* (PQ)
model_config.logit_temp = 0.1
model_config.alpha = 1.0
model_config.taxonomy_loss_weight = 0.0
model_config.readout_points = [3, 4, 5, 6, 7]
model_config.quantizer_points = (-2,)
model_config.stop_gradient_earlyfs = False
model_config.use_raw_audio = True
model_config.update(**kwargs)
return model_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model."""
from chirp import config_utils
from chirp.configs import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = presets.get_base_config(
train_window_size_s=30,
eval_window_size_s=5,
frame_rate_hz=100,
num_channels=160,
pad_mask=True,
)
config.train_dataset_config = presets.get_supervised_train_pipeline(
config,
mixin_prob=0.75,
train_dataset_dir='bird_taxonomy/full_length:1.4.0',
)
config.eval_dataset_config = presets.get_supervised_eval_pipeline(
config, 'soundscapes/caples:1.1.0'
)
# Configure the experiment setup
config.init_config = presets.get_base_init_config(config)
config.init_config.optimizer = _c(
'optax.adam', learning_rate=config.init_config.get_ref('learning_rate')
)
model_config = config_dict.ConfigDict()
dim = 512
model_config.frontend = _c(
'layers.EarlyFeatureExtractor',
conv_layer_tuples=(
(dim, 20, 10),
(dim, 3, 2),
(dim, 3, 2),
(dim, 3, 2),
(dim, 3, 2),
(dim, 2, 2),
(160, 2, 2),
),
)
# Aim to have output targets of 256, starting at 144
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
kernel_size=15,
)
model_config.taxonomy_loss_weight = 0.0
config.init_config.model_config = model_config
# Configure the training loop
config.train_config = presets.get_base_train_config(config)
input_shape = (
config.get_ref('eval_window_size_s') * config.get_ref('sample_rate_hz'),
)
config.eval_config = presets.get_base_eval_config(
config, input_shape=input_shape
)
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model."""
from chirp import config_utils
from chirp.configs import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = presets.get_base_config()
# Configure the data
config.train_dataset_config = presets.get_supervised_train_pipeline(
config,
mixin_prob=0.75,
train_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
)
config.eval_dataset_config = presets.get_supervised_eval_pipeline(
config, 'soundscapes/powdermill:1.3.0'
)
# Configure the experiment setup
config.init_config = presets.get_base_init_config(config)
config.init_config.optimizer = _c(
'optax.adam', learning_rate=config.init_config.get_ref('learning_rate')
)
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'efficientnet.EfficientNet',
model=_c(
'efficientnet.EfficientNetModel',
value='b1',
),
)
model_config.taxonomy_loss_weight = 0.001
model_config.frontend = presets.get_bio_pcen_melspec_config(config)
config.init_config.model_config = model_config
# Configure the training loop
config.train_config = presets.get_base_train_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.export_config = config_dict.ConfigDict()
config.export_config.input_shape = (
config.get_ref('eval_window_size_s') * config.get_ref('sample_rate_hz'),
)
config.export_config.num_train_steps = config.get_ref('num_train_steps')
return config
def get_hyper(hyper):
return hyper.sweep('config.batch_size', hyper.discrete([256]))
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
sample_rate_hz = config_dict.FieldReference(32_000)
batch_size = config_dict.FieldReference(768)
config = config_dict.ConfigDict()
config.sample_rate_hz = sample_rate_hz
config.batch_size = batch_size
# Configure the data
window_size_s = config_dict.FieldReference(5)
train_dataset_config = config_dict.ConfigDict()
train_dataset_config.pipeline = _c(
"pipeline.Pipeline",
ops=[
_c("pipeline.Shuffle", shuffle_buffer_size=512),
_c("pipeline.OnlyKeep", names=["audio"]),
_c(
"pipeline.Batch", batch_size=batch_size, split_across_devices=True
),
_c("pipeline.RandomSlice", window_size=window_size_s),
_c(
"pipeline.MelSpectrogram",
features=160,
stride=sample_rate_hz // 128,
kernel_size=2_048, # ~0.08 * 32,000
sample_rate=sample_rate_hz,
freq_range=(60, 10_000),
scaling_config=_c("frontend.PCENScalingConfig", conv_width=256),
),
_c("pipeline.AddChannel"),
_c("pipeline.Repeat"),
],
)
train_dataset_config.split = "train"
config.train_dataset_config = train_dataset_config
# Configure the experiment setup
init_config = config_dict.ConfigDict()
init_config.learning_rate = 0.0001
init_config.input_shape = (640, 160, 1)
init_config.rng_seed = 0
config.init_config = init_config
model_config = config_dict.ConfigDict()
init_config.model_config = model_config
# Configure the training loop
num_train_steps = config_dict.FieldReference(1_000_000)
train_config = config_dict.ConfigDict()
train_config.num_train_steps = num_train_steps
train_config.log_every_steps = 250
train_config.checkpoint_every_steps = 25_000
config.train_config = train_config
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.reload_quantizer_from = ""
# Freeze the quantizer.
config.train_config.quant_loss_mult = 0
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preset Configurations.
Philosophy:
* The base_config contains values which will be re-used throughout the main
configuration.
* Refer to values in the base_config using `get_ref` to ensure that the values
update correctly when performing hyperparameter sweeps or testing.
* The config_utils.parse_config resolves all references, so that downstream
code doesn't panic.
"""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
_o = config_utils.object_config
def get_base_config(**kwargs):
"""Create the base config object.
Contains common values and FieldReferences.
Args:
**kwargs: Values to add or override in the base config.
Returns:
Config dict containing common default values.
"""
config = config_dict.ConfigDict()
config.sample_rate_hz = 32000
config.train_window_size_s = 5
config.eval_window_size_s = 5
config.frame_rate_hz = 100
config.num_channels = 128
config.batch_size = 256
config.add_taxonomic_labels = True
config.target_class_list = 'xenocanto'
config.num_train_steps = 1_000_000
config.loss_fn = _o('optax.sigmoid_binary_cross_entropy')
config.pad_mask = False
config.tfds_data_dir = ''
config.update(kwargs)
return config
def get_base_init_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default init config."""
init_config = config_dict.ConfigDict()
init_config.input_shape = (
config.get_ref('train_window_size_s') * config.get_ref('sample_rate_hz'),
)
init_config.learning_rate = 0.001
init_config.rng_seed = 0
init_config.target_class_list = config.get_ref('target_class_list')
init_config.update(**kwargs)
return init_config
def get_base_train_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
train_config = config_dict.ConfigDict()
train_config.num_train_steps = config.get_ref('num_train_steps')
train_config.log_every_steps = 250
train_config.checkpoint_every_steps = 25_000
train_config.update(**kwargs)
return train_config
def get_base_eval_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
eval_config = config_dict.ConfigDict()
eval_config.num_train_steps = config.get_ref('num_train_steps')
eval_config.eval_steps_per_checkpoint = 1000
eval_config.update(**kwargs)
return eval_config
def get_pcen_melspec_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
"""Get a default PCEN Melspec configuration."""
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
kernel_size, nfft = config_utils.get_melspec_defaults(config)
return _c(
'frontend.SimpleMelspec',
features=config.get_ref('num_channels'),
stride=frontend_stride,
kernel_size=kernel_size,
nfft=nfft,
sample_rate=config.get_ref('sample_rate_hz'),
freq_range=(60, 10_000),
scaling_config=_c('frontend.PCENScalingConfig', conv_width=256),
)
def get_new_pcen_melspec_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
"""Get a hand-rolled PCEN Melspec configuration."""
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
kernel_size, nfft = config_utils.get_melspec_defaults(config)
return _c(
'frontend.SimpleMelspec',
features=config.get_ref('num_channels'),
stride=frontend_stride,
kernel_size=kernel_size,
nfft=nfft,
sample_rate=config.get_ref('sample_rate_hz'),
freq_range=(50, config.get_ref('sample_rate_hz') // 2),
power=1.0,
scaling_config=_c(
'frontend.PCENScalingConfig',
smoothing_coef=0.01,
gain=0.8,
bias=0.01,
root=4.0,
eps=1e-6,
spcen=False,
conv_width=256,
),
)
def get_bio_pcen_melspec_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
"""Get PCEN Melspec configuration as in 'PCEN: Why and How'.
https://www.justinsalamon.com/uploads/4/3/9/4/4394963/lostanlen_pcen_spl2018.pdf
Args:
config: Configuration config_dict.
Returns:
Callable config.
"""
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
kernel_size, nfft = config_utils.get_melspec_defaults(config)
return _c(
'frontend.SimpleMelspec',
features=config.get_ref('num_channels'),
stride=frontend_stride,
kernel_size=kernel_size,
nfft=nfft,
sample_rate=config.get_ref('sample_rate_hz'),
freq_range=(50, config.get_ref('sample_rate_hz') // 2),
power=1.0,
scaling_config=_c(
'frontend.PCENScalingConfig',
smoothing_coef=0.145,
gain=0.8,
bias=10.0,
root=4.0,
eps=1e-6,
spcen=False,
conv_width=256,
),
)
def get_supervised_train_pipeline(
config: config_dict.ConfigDict, mixin_prob: float, train_dataset_dir: str
) -> config_dict.ConfigDict:
"""Create the supervised training data pipeline."""
train_dataset_config = config_dict.ConfigDict()
train_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.Shuffle', shuffle_buffer_size=512),
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=config.get_ref('add_taxonomic_labels'),
),
_c('pipeline.MixAudio', mixin_prob=mixin_prob),
_c(
'pipeline.Pad',
pad_size=config.get_ref('train_window_size_s'),
add_mask=config.get_ref('pad_mask'),
),
_c(
'pipeline.RandomSlice',
window_size=config.get_ref('train_window_size_s'),
),
_c(
'pipeline.Batch',
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
),
_c('pipeline.RandomNormalizeAudio', min_gain=0.15, max_gain=0.25),
_c('pipeline.Repeat'),
],
)
train_dataset_config.split = 'train'
train_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
train_dataset_config.dataset_directory = train_dataset_dir
return train_dataset_config
def get_supervised_eval_pipeline(
config: config_dict.ConfigDict, eval_dataset_dir: str
) -> config_dict.ConfigDict:
"""Create Caples eval data pipeline."""
eval_dataset_config = config_dict.ConfigDict()
eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=[
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=config.get_ref('add_taxonomic_labels'),
),
_c(
'pipeline.Pad',
pad_size=config.get_ref('eval_window_size_s'),
random=False,
add_mask=config.get_ref('pad_mask'),
),
_c(
'pipeline.Slice',
window_size=config.get_ref('eval_window_size_s'),
start=0.0,
),
_c(
'pipeline.Batch',
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
),
_c('pipeline.NormalizeAudio', target_gain=0.2),
],
)
eval_dataset_config.split = 'train'
eval_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
eval_dataset_config.dataset_directory = eval_dataset_dir
return eval_dataset_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base configuration for model evaluation using the v1 protocol."""
import itertools
from chirp import config_utils
from ml_collections import config_dict
_callable_config = config_utils.callable_config
_object_config = config_utils.object_config
_TFDS_DATA_DIR = None
def _crop_if_slice_peaked(to_crop: bool, **kwargs):
return [_callable_config('pipeline.Slice', **kwargs)] if to_crop else []
def _melspec_if_baseline(config_string: str, **kwargs):
return (
[_callable_config('pipeline.MelSpectrogram', **kwargs)]
if config_string == 'baseline'
else []
)
def get_config() -> config_dict.ConfigDict:
"""Creates a base configuration dictionary for the v1 evaluation protocol.
The v1 protocol evaluates on artificially rare Sapsucker Woods (SSW) species
and on held-out Colombia and Hawaii species.
Returns:
The base configuration dictionary for the v1 evaluation protocol.
"""
config = config_dict.ConfigDict()
tfds_data_dir = config_dict.FieldReference(_TFDS_DATA_DIR)
config.tfds_data_dir = tfds_data_dir
# The PRNG seed controls the random subsampling of class representatives down
# to the right number of when forming eval sets.
config.rng_seed = 1234
config.write_results_dir = '/tmp/'
config.batch_size = 16
# Xeno-Canto's slice_peaked variants contain 6-second audio segments that are
# randomly cropped to 5-second segments during training. At evaluation, we
# center-crop them down to 5-second segments. Soundscapes' audio segments are
# already 5-seconds long and do not need any cropping.
xc_window_size_seconds = 5
xc_slice_start = 0.5
# The audio is normalized to a target gain of 0.2.
target_gain = 0.2
required_datasets = (
{
'dataset_name': 'xc_artificially_rare',
'to_crop': True,
'tfds_name': 'bird_taxonomy/upstream_ar_only_slice_peaked:1.*.*',
},
{
'dataset_name': 'xc_downstream',
'to_crop': True,
'tfds_name': 'bird_taxonomy/downstream_slice_peaked:1.*.*',
},
{
'dataset_name': 'birdclef_ssw',
'to_crop': False,
'tfds_name': 'soundscapes/ssw',
},
{
'dataset_name': 'birdclef_colombia',
'to_crop': False,
'tfds_name': 'soundscapes/birdclef2019_colombia',
},
)
dataset_configs = {}
for dataset_description in required_datasets:
dataset_config = config_dict.ConfigDict()
dataset_config.tfds_name = dataset_description['tfds_name']
dataset_config.tfds_data_dir = tfds_data_dir
ops = [
_callable_config(
'pipeline.OnlyKeep',
names=[
'audio',
'label',
'bg_labels',
'recording_id',
'segment_id',
],
),
# Xeno-Canto data needs to be cropped before normalizing the audio.
_crop_if_slice_peaked(
dataset_description['to_crop'],
window_size=xc_window_size_seconds,
start=xc_slice_start,
),
_callable_config('pipeline.NormalizeAudio', target_gain=target_gain),
_callable_config('pipeline.LabelsToString'),
]
dataset_config.pipeline = _callable_config(
'pipeline.Pipeline', ops=ops, deterministic=True
)
dataset_config.split = 'train'
dataset_configs[dataset_description['dataset_name']] = dataset_config
config.dataset_configs = dataset_configs
# Build all eval set specifications.
config.eval_set_specifications = {}
for corpus_type, location in itertools.product(
('xc_fg', 'xc_bg', 'birdclef'), ('ssw', 'colombia', 'hawaii')
):
# SSW species are "artificially rare" (a limited number of examples were
# included during upstream training). If provided, we use the singular
# learned vector representation from upstream training during search.
# Otherwise, we use all available upstream recordings.
if location == 'ssw':
config.eval_set_specifications[f'artificially_rare_{corpus_type}'] = (
_callable_config(
'eval_lib.EvalSetSpecification.v1_specification',
location=location,
corpus_type=corpus_type,
num_representatives_per_class=-1,
)
)
# For downstream species, we sweep over {1, 2, 4, 8, 16} representatives
# per class, and in each case we resample the collection of class
# representatives 5 times to get confidence intervals on the metrics.
else:
for k, seed in itertools.product((1, 2, 4, 8, 16), range(1, 6)):
config.eval_set_specifications[
f'{location}_{corpus_type}_{k}_seed{seed}'
] = _callable_config(
'eval_lib.EvalSetSpecification.v1_specification',
location=location,
corpus_type=corpus_type,
num_representatives_per_class=k,
)
config.debug = config_dict.ConfigDict()
# Path to the embedded dataset cache. If set, the embedded dataset will be
# cached at that path and used upon subsequent runs without recomputing the
# embeddings.
#
# **WARNING**: only use to speed up debugging. When the path is set and a
# cache, already exists, the model callback will be ignored. No effect will
# occur if there are updates to the model without updating the cache path
# (i.e. metrics will be computed with respect to a previous model callback's
# embeddings).
config.debug.embedded_dataset_cache_path = ''
# The following two fields should be populated by the user in an eval config,
# and each point to a local function, callable, or to one of the functions
# provided in
# google-research/chirp/eval/eval_lib.py.
config.create_species_query = None
# Determines the ordering of search results for use in average-precision based
# metrics. For similarity-based metrics, set sort_descending to True. For
# distance-based metrics, set this to False (for ascending ordering).
config.sort_descending = None
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample evaluation protocol v1 configuration."""
from chirp import config_utils
from chirp.configs import eval_protocol_v1_base
from ml_collections import config_dict
_c = config_utils.callable_config
_object_config = config_utils.object_config
SEP_PATH = ''
def get_config() -> config_dict.ConfigDict:
"""Creates a configuration dictionary for the evaluation protocol v1."""
config = eval_protocol_v1_base.get_config()
# The model_callback is expected to be a Callable[[np.ndarray], np.ndarray].
model_checkpoint_path = config_dict.FieldReference(SEP_PATH)
config.model_checkpoint_path = model_checkpoint_path
config.model_callback = _c(
'eval_lib.SeparatorTFCallback', model_path=model_checkpoint_path
)
# TODO(bringingjoy): extend create_species_query to support returning multiple
# queries for a given eval species.
config.create_species_query = _object_config('eval_lib.create_averaged_query')
config.score_search = _object_config('eval_lib.cosine_similarity')
config.sort_descending = True
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.learning_rate_schedule = "cosine_decay"
config.init_config.start_learning_rate = 0.0001
config.init_config.reload_hubert_from = ""
# Decide on which layer to add the supervision.
# note that index 6 is the last layer!
config.init_config.model_config.omit_classifier_stop_grads = (6,)
# Turn off the HuBERT and quantizer training.
config.train_config.hubert_loss_mult = 0
config.train_config.quant_loss_mult = 0
return config
def get_hyper(hyper):
return hyper.sweep(
"config.init_config.start_learning_rate", hyper.discrete([0.0001])
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from chirp.configs import hubert_presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
conformer_config = hubert_presets.get_conformer_config(
atten_num_heads=12, num_blocks=16
)
config.init_config.model_config.late_feature_extractor = _c(
"conformer.Conformer", conformer_config
)
config.init_config.model_config.readout_points = [0, 4, 8, 12, 15]
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the globals that can be used in configuration files."""
from typing import Any
from chirp import audio_utils
from chirp import config_utils
from chirp.eval import callbacks
from chirp.eval import eval_lib
from chirp.models import conformer
from chirp.models import efficientnet
from chirp.models import frontend
from chirp.models import handcrafted_features
from chirp.models import hubert
from chirp.models import layers
from chirp.models import quantizers
from chirp.models import soundstream_unet
from chirp.models import taxonomy_model
from chirp.preprocessing import pipeline
from flax import linen as nn
import optax
def get_globals() -> dict[str, Any]:
return {
"audio_utils": audio_utils,
"callbacks": callbacks,
"config_utils": config_utils,
"conformer": conformer,
"efficientnet": efficientnet,
"eval_lib": eval_lib,
"hubert": hubert,
"quantizers": quantizers,
"frontend": frontend,
"layers": layers,
"nn": nn,
"optax": optax,
"pipeline": pipeline,
"handcrafted_features": handcrafted_features,
"soundstream_unet": soundstream_unet,
"taxonomy_model": taxonomy_model,
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.base_quantizer_config.num_centroids = 1024
config.init_config.quantizer_config.num_sections = 2
config.init_config.quantizer_config.strategy = "residual_quantization"
config.init_config.quantizer_config.use_entropy_quantizer = False
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.workdir = ""
return config
def get_hyper(hyper):
return hyper.product([
hyper.sweep(
"config.eval_config.train_mode_at_eval", hyper.discrete([True, False])
),
hyper.sweep(
"config.eval_config.mask_at_eval", hyper.discrete([True, False])
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_base_pq
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_base_pq.get_config()
config.init_config.reload_hubert_omit_quantizers = True
config.init_config.reload_hubert_from = ""
config.init_config.model_config.quantizer_points = (6,)
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT with Product Quantizers."""
from chirp import config_utils
from chirp.configs import hubert_presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_presets.get_base_config()
# Configure the data
config.train_dataset_config = hubert_presets.get_train_pipeline(
config,
mixin_prob=0.75,
train_dataset_dir="bird_taxonomy/slice_peaked:1.4.0",
)
config.eval_dataset_config = hubert_presets.get_eval_pipeline(
config, "soundscapes/caples:1.1.0"
)
# Configure the experiment setup
config.init_config = hubert_presets.get_base_init_config(config)
model_config = hubert_presets.get_model_config()
config.init_config.model_config = model_config
conformer_config = hubert_presets.get_conformer_config()
model_config.late_feature_extractor = _c(
"conformer.Conformer", conformer_config
)
early_fs_config = hubert_presets.get_early_fs_config()
config.init_config.early_fs_config = early_fs_config
mask_config = hubert_presets.get_mask_config()
model_config.mask_config = mask_config
classifier_config = hubert_presets.get_classifier_config()
model_config.classifier_config = classifier_config
quantizer_config = hubert_presets.get_quantizer_config()
base_quantizer_config = hubert_presets.get_base_quantizer_config()
config.init_config.quantizer_config = quantizer_config
config.init_config.base_quantizer_config = base_quantizer_config
frontend_config = hubert_presets.get_frontend_config(config)
config.init_config.frontend_config = frontend_config
config.train_config = hubert_presets.get_base_train_config(config)
config.eval_config = hubert_presets.get_base_eval_config(config)
config.export_config = config_dict.ConfigDict()
config.export_config.input_shape = (
config.get_ref("eval_window_size_s") * config.get_ref("sample_rate_hz"),
)
config.export_config.num_train_steps = config.get_ref("num_train_steps")
return config
def get_hyper(hyper):
return hyper.sweep(
"config.init_config.learning_rate", hyper.discrete([0.0001])
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HuBERT presets for the control experiments."""
from chirp.config_utils import callable_config as _c
from chirp.configs import hubert_presets as hubert_presets_default
from chirp.configs.debugging import presets as presets_debug
from ml_collections import config_dict
def get_base_config(**kwargs):
"""Create the base config object.
Contains common values and FieldReferences.
Args:
**kwargs: Values to add or override in the base config.
Returns:
Config dict containing common default values.
"""
config = presets_debug.get_base_config(**kwargs)
config.batch_size = 128
config.num_train_steps = 4_000_000
config.num_quantizer_pretrain_steps = 0
config.update(kwargs)
return config
def get_base_init_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default init config."""
init_config = presets_debug.get_base_init_config(config, **kwargs)
init_config.learning_rate_schedule = 'piecewise_linear'
init_config.learning_rate = 0.0001
init_config.start_learning_rate = 0.000001
init_config.quant_start_learning_rate = 1e-5
init_config.reload_quantizer_from = ''
init_config.reload_hubert_from = ''
init_config.reload_hubert_omit_quantizers = False
init_config.update(**kwargs)
return init_config
def get_base_train_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default train config."""
train_config = presets_debug.get_base_train_config(config, **kwargs)
train_config.num_quantizer_pretrain_steps = config.get_ref(
'num_quantizer_pretrain_steps'
)
train_config.readout_loss_mult = 100
train_config.hubert_loss_mult = 1
train_config.quant_loss_mult = 1
train_config.update(**kwargs)
return train_config
def get_base_eval_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
eval_config = presets_debug.get_base_eval_config(config, **kwargs)
eval_config.train_mode_at_eval = False
eval_config.mask_at_eval = False
eval_config.update(**kwargs)
return eval_config
def get_frontend_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Get the frontend config."""
frontend_config = config_dict.ConfigDict()
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
frontend_config.features = config.get_ref('num_channels')
frontend_config.stride = frontend_stride
# ~0.08 * 32,000 -- note: in previous HuBERT configs, this was 2_560
frontend_config.kernel_size = 2_048
frontend_config.sample_rate = config.get_ref('sample_rate_hz')
frontend_config.freq_range = (60, 10_000)
frontend_config.scaling_config = _c(
'frontend.PCENScalingConfig',
# Disable convolutional approximation
conv_width=0,
# Solution to 2*pi*tau/T = arccos(1 - s^2/(2 * (1 - s))) (prop III.1)
# for tau = 1.5 ms and T = 60 ms
smoothing_coef=0.145,
gain=0.8,
bias=10.0,
root=4.0,
)
frontend_config.omit_frontend = False
frontend_config.update(**kwargs)
return frontend_config
def get_train_pipeline(
config: config_dict.ConfigDict, train_dataset_dir: str
) -> config_dict.ConfigDict:
"""Create the supervised training data pipeline."""
return presets_debug.get_supervised_train_pipeline(config, train_dataset_dir)
def get_eval_pipeline(
config: config_dict.ConfigDict, eval_dataset_dir: str | dict[str, str]
) -> config_dict.ConfigDict:
"""Create Caples eval data pipeline."""
return presets_debug.get_supervised_eval_pipeline(config, eval_dataset_dir)
def get_conformer_config(**kwargs) -> config_dict.ConfigDict:
return hubert_presets_default.get_conformer_config(**kwargs)
def get_early_fs_config(**kwargs) -> config_dict.ConfigDict:
return hubert_presets_default.get_early_fs_config(**kwargs)
def get_mask_config(**kwargs) -> config_dict.ConfigDict:
return hubert_presets_default.get_mask_config(**kwargs)
def get_classifier_config(**kwargs) -> config_dict.ConfigDict:
return hubert_presets_default.get_classifier_config(**kwargs)
def get_quantizer_config(**kwargs) -> config_dict.ConfigDict:
return hubert_presets_default.get_quantizer_config(**kwargs)
def get_base_quantizer_config(**kwargs) -> config_dict.ConfigDict:
return hubert_presets_default.get_base_quantizer_config(**kwargs)
def get_model_config(**kwargs) -> config_dict.ConfigDict:
model_config = hubert_presets_default.get_model_config(**kwargs)
model_config.readout_points = [6]
return model_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run baseline model.
from chirp import config_utils
from chirp.configs.debugging import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config(config_string: str | None = None) -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
if config_string not in (
None,
'random_slice',
'random_gain',
'mixup',
'pcen',
'conformer',
):
raise ValueError('unexpected config string')
config = presets.get_base_config()
# Configure the data
config.train_dataset_config = presets.get_supervised_train_pipeline(
config,
# TODO(bartvm): Recreate datasets with new labels?
train_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
mixup=(config_string == 'mixup'),
random_slice=(config_string == 'random_slice'),
random_gain=(config_string == 'random_gain'),
)
# TODO(bartvm): Add XC validation split
config.eval_dataset_config = presets.get_supervised_eval_pipeline(
config,
{
'caples': 'soundscapes/caples:1.1.0',
'xc': 'bird_taxonomy/slice_peaked:1.4.0',
},
normalize=(config_string == 'random_gain'),
)
# Configure the experiment setup
config.init_config = presets.get_base_init_config(config)
config.init_config.optimizer = _c(
'optax.adam', learning_rate=config.init_config.get_ref('learning_rate')
)
model_config = config_dict.ConfigDict()
if config_string == 'conformer':
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
# Each downsample reduces time by a factor of 2.
# An additional downsample by 4 happens in the ConvolutionalSubsampling.
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
features=256,
kernel_size=32,
)
else:
model_config.encoder = _c(
'efficientnet.EfficientNet',
model=_c('efficientnet.EfficientNetModel', value='b5'),
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
config.init_config.model_config = model_config
if config_string == 'pcen':
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
config.init_config.model_config.frontend = _c(
'frontend.MelSpectrogram',
features=config.get_ref('num_channels'),
stride=frontend_stride,
kernel_size=2_048, # ~0.08 ms * 32,000 Hz
sample_rate=config.get_ref('sample_rate_hz'),
freq_range=(60, 10_000),
scaling_config=_c(
'frontend.PCENScalingConfig',
conv_width=0,
smoothing_coef=0.1,
gain=0.5,
bias=2.0,
root=2.0,
),
)
# Configure the training loop
config.train_config = presets.get_base_train_config(config)
config.eval_config = presets.get_base_eval_config(
config,
input_shape=(
config.get_ref('eval_window_size_s')
* config.get_ref('sample_rate_hz'),
),
)
return config
def get_hyper(hyper):
return hyper.sweep(
'config.init_config.learning_rate', hyper.discrete([1e-3, 1e-2])
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Presets for the control experiments."""
from chirp.config_utils import callable_config as _c
from chirp.config_utils import object_config as _o
from ml_collections import config_dict
def get_base_config(**kwargs):
"""Create the base config object.
Contains common values and FieldReferences.
Args:
**kwargs: Values to add or override in the base config.
Returns:
Config dict containing common default values.
"""
config = config_dict.ConfigDict()
config.sample_rate_hz = 32_000
config.train_window_size_s = 5
config.eval_window_size_s = 5
config.frame_rate_hz = 100
config.num_channels = 160
config.batch_size = 256
config.target_class_list = 'xenocanto'
config.num_train_steps = 200_000
config.loss_fn = _o('optax.sigmoid_binary_cross_entropy')
config.tfds_data_dir = ''
config.update(kwargs)
return config
def get_base_init_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default init config."""
init_config = config_dict.ConfigDict()
init_config.input_shape = (
config.get_ref('train_window_size_s') * config.get_ref('sample_rate_hz'),
)
init_config.learning_rate = 0.0001
init_config.rng_seed = 0
init_config.target_class_list = config.get_ref('target_class_list')
init_config.update(**kwargs)
return init_config
def get_base_train_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
train_config = config_dict.ConfigDict()
train_config.num_train_steps = config.get_ref('num_train_steps')
train_config.log_every_steps = 1_250
train_config.checkpoint_every_steps = 5_000
train_config.update(**kwargs)
return train_config
def get_base_eval_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
eval_config = config_dict.ConfigDict()
eval_config.num_train_steps = config.get_ref('num_train_steps')
eval_config.tflite_export = False
eval_config.update(**kwargs)
return eval_config
def get_pcen_melspec_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
return _c(
'frontend.MelSpectrogram',
features=config.get_ref('num_channels'),
stride=frontend_stride,
kernel_size=2_048, # ~0.08 ms * 32,000 Hz
sample_rate=config.get_ref('sample_rate_hz'),
freq_range=(60, 10_000),
# Settings from PCEN: Why and how
scaling_config=_c(
'frontend.PCENScalingConfig',
# Disable convolutional approximation
conv_width=0,
# Solution to 2*pi*tau/T = arccos(1 - s^2/(2 * (1 - s))) (prop III.1)
# for tau = 1.5 ms and T = 60 ms
smoothing_coef=0.145,
gain=0.8,
bias=10.0,
root=4.0,
),
)
def get_supervised_train_pipeline(
config: config_dict.ConfigDict,
train_dataset_dir: str,
mixup=False,
random_slice=False,
random_gain=False,
) -> config_dict.ConfigDict:
"""Create the supervised training data pipeline."""
if train_dataset_dir != 'bird_taxonomy/slice_peaked:1.4.0':
raise ValueError('we assume training on XC')
train_dataset_config = config_dict.ConfigDict()
if random_slice:
slice_op = _c(
'pipeline.RandomSlice',
window_size=config.get_ref('train_window_size_s'),
)
else:
slice_op = _c(
'pipeline.Slice',
window_size=config.get_ref('train_window_size_s'),
start=0.5,
)
ops = [
_c('pipeline.Shuffle', shuffle_buffer_size=512),
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=False,
),
_c(
'pipeline.MixAudio',
target_dist=(1.0, 0.5, 0.25, 0.25) if mixup else (1.0,),
),
slice_op,
_c(
'pipeline.Batch',
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
),
_c('pipeline.Repeat'),
]
if random_gain:
ops.append(
_c('pipeline.RandomNormalizeAudio', min_gain=0.15, max_gain=0.25)
)
train_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=ops,
)
train_dataset_config.split = 'train'
train_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
train_dataset_config.dataset_directory = train_dataset_dir
return train_dataset_config
def get_supervised_eval_pipeline(
config: config_dict.ConfigDict,
eval_dataset_dir: str | dict[str, str],
normalize=False,
) -> config_dict.ConfigDict:
"""Create Caples eval data pipeline."""
if isinstance(eval_dataset_dir, dict):
return config_dict.ConfigDict(
{
name: get_supervised_eval_pipeline(config, dataset_dir, normalize)
for name, dataset_dir in eval_dataset_dir.items()
}
)
eval_dataset_config = config_dict.ConfigDict()
ops = [
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=config.get_ref('target_class_list'),
add_taxonomic_labels=False,
),
_c(
'pipeline.Slice',
window_size=config.get_ref('eval_window_size_s'),
start=0.5,
),
_c(
'pipeline.Batch',
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
),
]
if normalize:
ops.append(_c('pipeline.NormalizeAudio', target_gain=0.2))
eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=ops,
)
eval_dataset_config.split = 'train'
eval_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
eval_dataset_config.dataset_directory = eval_dataset_dir
return eval_dataset_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run HuBERT model control experiment."""
from chirp import config_utils
from chirp.configs.debugging import hubert_presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_config() -> config_dict.ConfigDict:
"""Create configuration dictionary for training."""
config = hubert_presets.get_base_config()
# Configure the data
# TODO(etriantafillou): Switch this to XC training split
config.train_dataset_config = hubert_presets.get_train_pipeline(
config,
train_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
)
# TODO(etriantafillou): Add XC validation split
config.eval_dataset_config = hubert_presets.get_eval_pipeline(
config,
{
'caples': 'soundscapes/caples:1.1.0',
'xc': 'bird_taxonomy/slice_peaked:1.4.0',
},
)
# Configure the experiment setup
config.init_config = hubert_presets.get_base_init_config(config)
model_config = hubert_presets.get_model_config()
config.init_config.model_config = model_config
conformer_config = hubert_presets.get_conformer_config()
model_config.late_feature_extractor = _c(
'conformer.Conformer', conformer_config
)
early_fs_config = hubert_presets.get_early_fs_config()
config.init_config.early_fs_config = early_fs_config
mask_config = hubert_presets.get_mask_config()
model_config.mask_config = mask_config
classifier_config = hubert_presets.get_classifier_config()
model_config.classifier_config = classifier_config
model_config.taxonomy_loss_weight = 0.0
quantizer_config = hubert_presets.get_quantizer_config()
base_quantizer_config = hubert_presets.get_base_quantizer_config()
config.init_config.quantizer_config = quantizer_config
config.init_config.base_quantizer_config = base_quantizer_config
frontend_config = hubert_presets.get_frontend_config(config)
config.init_config.frontend_config = frontend_config
config.train_config = hubert_presets.get_base_train_config(config)
config.eval_config = hubert_presets.get_base_eval_config(
config,
input_shape=(
config.get_ref('eval_window_size_s')
* config.get_ref('sample_rate_hz'),
),
)
return config
def get_hyper(hyper):
return hyper.sweep(
'config.init_config.learning_rate', hyper.discrete([0.0001])
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run the logistic regression baseline."""
from chirp import config_utils
from chirp.configs.debugging import presets
from ml_collections import config_dict
_c = config_utils.callable_config
_KERNEL_SIZE = 2_048 # ~0.08 ms * 32,000 Hz
def get_pipeline_ops(
filtering_df_path: str | None,
filter_by_complement: bool,
shuffle: bool,
target_class_list: str,
mixup: bool,
random_slice: bool,
slice_window_size: int,
slice_start: float,
random_normalize: bool,
melspec_num_channels: int,
melspec_frame_rate: int,
melspec_kernel_size: int,
sample_rate: int,
batch_size: int,
repeat: bool,
) -> list[config_dict.ConfigDict]:
"""Creates the pipeline ops."""
filtering_op = shuffle_op = mixup_op = repeat_op = None
melspec_stride = sample_rate // melspec_frame_rate
if filtering_df_path:
filtering_op = _c(
'pipeline.FilterByFeature',
filtering_df_path=filtering_df_path,
complement=filter_by_complement,
)
if shuffle:
shuffle_op = _c('pipeline.Shuffle', shuffle_buffer_size=512)
if mixup:
mixup_op = _c('pipeline.MixAudio', target_dist=(1.0, 0.5, 0.25, 0.25))
if random_slice:
slice_op = _c('pipeline.RandomSlice', window_size=slice_window_size)
else:
slice_op = _c(
'pipeline.Slice',
window_size=slice_window_size,
start=slice_start,
)
if random_normalize:
normalize_op = _c(
'pipeline.RandomNormalizeAudio', min_gain=0.15, max_gain=0.25
)
else:
normalize_op = _c('pipeline.NormalizeAudio', target_gain=0.2)
if repeat:
repeat_op = _c('pipeline.Repeat')
ops = [
filtering_op,
shuffle_op,
_c('pipeline.OnlyJaxTypes'),
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=target_class_list,
add_taxonomic_labels=False,
),
mixup_op,
slice_op,
normalize_op,
_c(
'pipeline.MelSpectrogram',
features=melspec_num_channels,
stride=melspec_stride,
kernel_size=melspec_kernel_size,
sample_rate=sample_rate,
freq_range=(60, 10_000),
# Settings from PCEN: Why and how
scaling_config=_c(
'frontend.PCENScalingConfig',
# Disable convolutional approximation
conv_width=0,
# Solution to 2*pi*tau/T = arccos(1 - s^2/(2 * (1 - s)))
# (prop III.1) for tau = 1.5 ms and T = 60 ms
smoothing_coef=0.145,
gain=0.8,
bias=10.0,
root=4.0,
),
),
_c(
'pipeline.Batch',
batch_size=batch_size,
split_across_devices=True,
),
repeat_op,
]
return [op for op in ops if op is not None]
def get_supervised_train_pipeline(
config: config_dict.ConfigDict,
filtering_df_path: str | None,
filter_by_complement: bool,
train_dataset_dir: str,
) -> config_dict.ConfigDict:
"""Creates the supervised training data pipeline."""
if train_dataset_dir != 'bird_taxonomy/slice_peaked:1.4.0':
raise ValueError('we assume training on XC')
train_dataset_config = config_dict.ConfigDict()
train_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=get_pipeline_ops(
filtering_df_path=filtering_df_path,
filter_by_complement=filter_by_complement,
shuffle=True,
target_class_list=config.get_ref('target_class_list'),
mixup=True,
random_slice=True,
slice_window_size=config.get_ref('train_window_size_s'),
slice_start=0.0, # Unused because random_slice = True.
random_normalize=True,
melspec_num_channels=config.get_ref('num_channels'),
melspec_frame_rate=config.get_ref('frame_rate_hz'),
melspec_kernel_size=_KERNEL_SIZE,
sample_rate=config.get_ref('sample_rate_hz'),
batch_size=config.get_ref('batch_size'),
repeat=True,
),
)
train_dataset_config.split = 'train'
train_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
train_dataset_config.dataset_directory = train_dataset_dir
return train_dataset_config
def get_supervised_eval_pipeline(
config: config_dict.ConfigDict,
filtering_df_path: str | None,
filter_by_complement: bool,
slice_start: float,
eval_dataset_dir: str,
) -> config_dict.ConfigDict:
"""Creates an eval data pipeline."""
eval_dataset_config = config_dict.ConfigDict()
eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=get_pipeline_ops(
filtering_df_path=None,
filter_by_complement=True,
train_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
)
config.eval_dataset_config = {
'caples': get_supervised_eval_pipeline(
config,
filtering_df_path=None,
filter_by_complement=False,
slice_start=0.0,
eval_dataset_dir='soundscapes/caples:1.1.0',
),
'xc_train_subset': get_supervised_eval_pipeline(
config,
filtering_df_path=None,
filter_by_complement=False,
slice_start=0.5,
eval_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
),
'xc_test': get_supervised_eval_pipeline(
config,
filtering_df_path=None,
filter_by_complement=False,
slice_start=0.5,
eval_dataset_dir='bird_taxonomy/slice_peaked:1.4.0',
),
}
# Configure the experiment setup
config.init_config = presets.get_base_init_config(config)
config.init_config.optimizer = _c(
'optax.adam', learning_rate=config.init_config.get_ref('learning_rate')
)
encoder_config = config_dict.ConfigDict()
encoder_config.aggregation = 'beans'
encoder_config.compute_mfccs = True
encoder_config.num_mfccs = 20
config.encoder_config = encoder_config
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'handcrafted_features.HandcraftedFeatures',
compute_mfccs=encoder_config.get_ref('compute_mfccs'),
num_mfccs=encoder_config.get_ref('num_mfccs'),
aggregation=encoder_config.get_ref('aggregation'),
window_size=10,
window_stride=10,
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = None
config.init_config.model_config = model_config
# Configure the training loop
num_train = config.get_ref('train_window_size_s') * config.get_ref(
'sample_rate_hz'
)
num_eval = config.get_ref('eval_window_size_s') * config.get_ref(
'sample_rate_hz'
)
stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
# As explained in chirp.models.frontent.STFT, the output of
# chirp.data.pipeline.MelSpectrogram has shape [num_frames, num_channels], and
# num_frames is computed as
#
# (num_samples + stride - (kernel_size % 2)) // stride - correction,
#
# where correction is 1 if kernel_size is even and 0 otherwise.
odd_kernel = _KERNEL_SIZE % 2
num_train_frames = (
(num_train + stride - odd_kernel) // stride + odd_kernel - 1
)
num_eval_frames = (num_eval + stride - odd_kernel) // stride + odd_kernel - 1
config.init_config.input_shape = (
num_train_frames,
config.get_ref('num_channels'),
)
config.train_config = presets.get_base_train_config(config)
config.eval_config = presets.get_base_eval_config(
config,
input_shape=(num_eval_frames, config.get_ref('num_channels')),
)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
encoder_hypers = hyper.zipit([
hyper.sweep(
'config.encoder_config.aggregation',
['beans', 'flatten', 'avg_pool'],
),
hyper.sweep(
'config.encoder_config.compute_mfccs',
[True, True, False],
),
])
optimizer_hypers = hyper.sweep(
'config.init_config.learning_rate',
hyper.discrete([1e-3, 1e-2, 1e-1, 1e0]),
)
return hyper.product([encoder_hypers, optimizer_hypers])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run the logistic regression baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
_o = config_utils.object_config
def get_encoder_config() -> config_dict.ConfigDict:
encoder_config = config_dict.ConfigDict()
encoder_config.aggregation = 'avg_pool'
encoder_config.compute_mfccs = False
encoder_config.num_mfccs = 20 # Unused by default.
return encoder_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'handcrafted_features.HandcraftedFeatures',
compute_mfccs=config.encoder_config.get_ref('compute_mfccs'),
num_mfccs=config.encoder_config.get_ref('num_mfccs'),
aggregation=config.encoder_config.get_ref('aggregation'),
window_size=10,
window_stride=10,
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
batch_size=64,
melspec_in_pipeline=False,
random_augmentations=True,
cosine_alpha=0.0,
loss_fn=_o('layers.hinge_loss'),
)
config.encoder_config = get_encoder_config()
config.init_config = presets.get_base_init_config(config, learning_rate=10.0)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_ablation_train_dataset_config(
config
)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_ablation_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.init_config.rng_seed',
hyper.discrete([1239]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Presets for the baseline experiments."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
_o = config_utils.object_config
def get_pcen_melspec_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
"""Get a default PCEN Melspec configuration."""
frontend_stride = config.get_ref('sample_rate_hz') // config.get_ref(
'frame_rate_hz'
)
kernel_size, nfft = config_utils.get_melspec_defaults(config)
return _c(
'frontend.MelSpectrogram',
features=config.get_ref('num_channels'),
stride=frontend_stride,
kernel_size=kernel_size,
nfft=nfft,
sample_rate=config.get_ref('sample_rate_hz'),
freq_range=(60, 10_000),
scaling_config=_c('frontend.PCENScalingConfig', conv_width=256),
)
def get_base_config(**kwargs):
"""Creates the base config object.
Contains common values and FieldReferences.
Args:
**kwargs: Values to add or override in the base config.
Returns:
Config dict containing common default values.
"""
config = config_dict.ConfigDict()
config.sample_rate_hz = 32_000
config.train_window_size_s = 5
config.eval_window_size_s = 5
config.eval_window_stride_s = 2.5
config.frame_rate_hz = 100
config.num_channels = 160
config.kernel_size, config.nfft = config_utils.get_melspec_defaults(config)
config.batch_size = 256
config.target_class_list = 'xenocanto'
config.num_train_steps = 1_000_000
config.random_augmentations = True
config.melspec_in_pipeline = True
config.loss_fn = _o('optax.sigmoid_binary_cross_entropy')
# Set to 1.0 to turn off cosine decay. The default value for alpha is zero in
# optax.cosine_decay_schedule, so we want to try alpha \in {0, 1}.
config.cosine_alpha = 1.0
config.tfds_data_dir = ''
config.export_config = config_dict.ConfigDict()
config.export_config.input_shape = (
config.get_ref('eval_window_size_s') * config.get_ref('sample_rate_hz'),
)
config.export_config.num_train_steps = config.get_ref('num_train_steps')
config.update(kwargs)
return config
def _compute_input_shape(
config: config_dict.ConfigDict, window_size_ref: config_dict.FieldReference
) -> config_dict.ConfigDict:
"""Computes the models's input shape."""
# As explained in chirp.models.frontent.STFT, the output of
# chirp.data.pipeline.MelSpectrogram has shape [num_frames, num_channels], and
# num_frames is computed as
#
# (num_samples + stride - (kernel_size % 2)) // stride - correction,
#
# where correction is 1 if kernel_size is even and 0 otherwise.
num_samples = window_size_ref * config.get_ref('sample_rate_hz')
stride = config.get_ref('sample_rate_hz') // config.get_ref('frame_rate_hz')
odd_kernel = config.get_ref('kernel_size') % 2
rval = (
(num_samples + stride - odd_kernel) // stride + (odd_kernel - 1),
config.get_ref('num_channels'),
)
return _c(
'config_utils.either',
object_a=rval,
object_b=(num_samples,),
return_a=config.get_ref('melspec_in_pipeline'),
)
def get_base_init_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
"""Default init config."""
init_config = config_dict.ConfigDict()
init_config.input_shape = _compute_input_shape(
config, config.get_ref('train_window_size_s')
)
init_config.learning_rate = 0.0001
init_config.optimizer = _c(
'optax.adam',
learning_rate=_c(
'optax.cosine_decay_schedule',
init_value=init_config.get_ref('learning_rate'),
decay_steps=config.get_ref('num_train_steps'),
alpha=config.get_ref('cosine_alpha'),
),
)
init_config.rng_seed = 0
init_config.target_class_list = config.get_ref('target_class_list')
init_config.update(**kwargs)
return init_config
def get_base_train_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
train_config = config_dict.ConfigDict()
train_config.num_train_steps = config.get_ref('num_train_steps')
train_config.log_every_steps = 1_250
train_config.checkpoint_every_steps = 5_000
train_config.update(**kwargs)
return train_config
def get_base_train_dataset_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
return get_supervised_train_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
train_dataset_dir='bird_taxonomy/upstream_slice_peaked:2.0.0',
)
def get_ablation_train_dataset_config(
config: config_dict.ConfigDict,
) -> config_dict.ConfigDict:
return get_supervised_train_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=True,
train_dataset_dir='bird_taxonomy/slice_peaked:2.0.0',
)
def get_base_eval_config(
config: config_dict.ConfigDict, **kwargs
) -> config_dict.ConfigDict:
eval_config = config_dict.ConfigDict()
eval_config.num_train_steps = config.get_ref('num_train_steps')
eval_config.update(**kwargs)
return eval_config
def get_base_eval_dataset_config(
config: config_dict.ConfigDict,
) -> dict[str, config_dict.ConfigDict]:
return {
'powdermill': get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='strided_windows',
slice_start=0.0,
eval_dataset_dir='soundscapes/powdermill_full_length:1.3.0',
),
}
def get_ablation_eval_dataset_config(
config: config_dict.ConfigDict,
) -> dict[str, config_dict.ConfigDict]:
return {
'powdermill': get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='fixed',
slice_start=0.0,
eval_dataset_dir='soundscapes/powdermill:1.3.0',
),
'train_iid_subset': get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False,
slice_method='fixed',
slice_start=0.5,
eval_dataset_dir='bird_taxonomy/slice_peaked:2.0.0',
),
'test_iid': get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False,
slice_method='fixed',
slice_start=0.5,
eval_dataset_dir='bird_taxonomy/slice_peaked:2.0.0',
),
'test_label_shifted': get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False,
slice_method='fixed',
slice_start=0.5,
eval_dataset_dir='bird_taxonomy/slice_peaked:2.0.0',
),
}
def _get_pipeline_ops(
filtering_df_paths: list[str] | None,
filter_by_complement: bool,
shuffle: bool,
target_class_list: str,
mixup: bool | config_dict.FieldReference,
slice_method: str,
slice_window_size: int,
slice_window_stride: float,
slice_start: float,
random_normalize: bool | config_dict.FieldReference,
melspec_in_pipeline: bool | config_dict.FieldReference,
melspec_num_channels: int,
melspec_frame_rate: int,
melspec_kernel_size: int,
melspec_nfft: int,
sample_rate: int,
batch_size: int,
split_across_devices: bool,
drop_remainder: bool,
repeat: bool,
) -> list[config_dict.ConfigDict]:
"""Creates the pipeline ops."""
filtering_ops = []
shuffle_op = repeat_op = None
melspec_stride = sample_rate // melspec_frame_rate
if filtering_df_paths:
for filtering_df_path in filtering_df_paths:
filtering_ops.append(
_c(
'pipeline.FilterByFeature',
filtering_df_path=filtering_df_path,
complement=filter_by_complement,
)
)
if shuffle:
shuffle_op = _c('pipeline.Shuffle', shuffle_buffer_size=512)
mixup_op = _c(
'config_utils.either',
object_a=_c('pipeline.MixAudio', target_dist=(1.0, 0.5, 0.25, 0.25)),
object_b=_c('pipeline.DatasetPreprocessOp'),
return_a=mixup,
)
if slice_method == 'random':
slice_op = _c('pipeline.RandomSlice', window_size=slice_window_size)
annotate_op = None
elif slice_method == 'fixed':
slice_op = _c(
'pipeline.Slice',
window_size=slice_window_size,
start=slice_start,
)
annotate_op = None
elif slice_method == 'strided_windows':
slice_op = _c(
'pipeline.ExtractStridedWindows',
window_length_sec=slice_window_size,
window_stride_sec=slice_window_stride,
)
annotate_op = _c(
'pipeline.DenselyAnnotateWindows', drop_annotation_bounds=True
)
else:
raise ValueError(f'unrecognized slice method: {slice_method}')
normalize_op = _c(
'config_utils.either',
object_a=_c(
'pipeline.RandomNormalizeAudio', min_gain=0.15, max_gain=0.25
),
object_b=_c('pipeline.NormalizeAudio', target_gain=0.2),
return_a=random_normalize,
)
if repeat:
repeat_op = _c('pipeline.Repeat')
ops = filtering_ops + [
shuffle_op,
_c('pipeline.OnlyJaxTypes'),
slice_op,
annotate_op,
# NOTE: pipeline.ConvertBirdTaxonomyLabels comes *after* the slicing and
# annotation ops, as the pipeline.DenselyAnnotateWindows op used when
# slice_method == 'strided_windows' expects labels to be sequences of
# integers rather than multi-hot encoded vectors.
_c(
'pipeline.ConvertBirdTaxonomyLabels',
source_namespace='ebird2021',
target_class_list=target_class_list,
add_taxonomic_labels=True,
),
normalize_op,
mixup_op,
_c(
'config_utils.either',
object_a=_c(
'pipeline.MelSpectrogram',
features=melspec_num_channels,
stride=melspec_stride,
kernel_size=melspec_kernel_size,
nfft=melspec_nfft,
sample_rate=sample_rate,
freq_range=(60, 10_000),
scaling_config=_c('frontend.PCENScalingConfig', conv_width=256),
),
object_b=_c('pipeline.FeaturesPreprocessOp'),
return_a=melspec_in_pipeline,
),
_c(
'pipeline.Batch',
batch_size=batch_size,
split_across_devices=split_across_devices,
drop_remainder=drop_remainder,
),
repeat_op,
]
return [op for op in ops if op is not None]
def get_supervised_train_pipeline(
config: config_dict.ConfigDict,
filtering_df_paths: list[str] | None,
filter_by_complement: bool,
train_dataset_dir: str,
) -> config_dict.ConfigDict:
"""Creates the supervised training data pipeline."""
if train_dataset_dir not in (
'bird_taxonomy/upstream_slice_peaked:2.0.0',
'bird_taxonomy/slice_peaked:2.0.0',
):
raise ValueError('we assume training on XC')
train_dataset_config = config_dict.ConfigDict()
train_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=_get_pipeline_ops(
filtering_df_paths=filtering_df_paths,
filter_by_complement=filter_by_complement,
shuffle=True,
target_class_list=config.get_ref('target_class_list'),
mixup=config.get_ref('random_augmentations'),
slice_method='random',
slice_window_size=config.get_ref('train_window_size_s'),
slice_window_stride=0.0, # Unused because slice_method=random'.
slice_start=0.0, # Unused because slice_method='random'.
random_normalize=config.get_ref('random_augmentations'),
melspec_in_pipeline=config.get_ref('melspec_in_pipeline'),
melspec_num_channels=config.get_ref('num_channels'),
melspec_frame_rate=config.get_ref('frame_rate_hz'),
melspec_kernel_size=config.get_ref('kernel_size'),
melspec_nfft=config.get_ref('nfft'),
sample_rate=config.get_ref('sample_rate_hz'),
batch_size=config.get_ref('batch_size'),
split_across_devices=True,
drop_remainder=True,
repeat=True,
),
)
train_dataset_config.split = 'train'
train_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
train_dataset_config.dataset_directory = train_dataset_dir
return train_dataset_config
def get_supervised_eval_pipeline(
config: config_dict.ConfigDict,
filtering_df_paths: list[str] | None,
filter_by_complement: bool,
slice_method: str,
slice_start: float,
eval_dataset_dir: str,
) -> config_dict.ConfigDict:
"""Creates an eval data pipeline."""
eval_dataset_config = config_dict.ConfigDict()
eval_dataset_config.pipeline = _c(
'pipeline.Pipeline',
ops=_get_pipeline_ops(
filtering_df_paths=filtering_df_paths,
filter_by_complement=filter_by_complement,
shuffle=False,
target_class_list=config.get_ref('target_class_list'),
mixup=False,
slice_method=slice_method,
slice_window_size=config.get_ref('eval_window_size_s'),
slice_window_stride=config.get_ref('eval_window_stride_s'),
slice_start=slice_start,
random_normalize=False,
melspec_in_pipeline=config.get_ref('melspec_in_pipeline'),
melspec_num_channels=config.get_ref('num_channels'),
melspec_frame_rate=config.get_ref('frame_rate_hz'),
melspec_kernel_size=config.get_ref('kernel_size'),
melspec_nfft=config.get_ref('nfft'),
sample_rate=config.get_ref('sample_rate_hz'),
batch_size=config.get_ref('batch_size'),
split_across_devices=False,
drop_remainder=False,
repeat=False,
),
)
eval_dataset_config.split = 'train'
eval_dataset_config.tfds_data_dir = config.get_ref('tfds_data_dir')
eval_dataset_config.dataset_directory = eval_dataset_dir
return eval_dataset_config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to run the logistic regression baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
_o = config_utils.object_config
def get_encoder_config() -> config_dict.ConfigDict:
encoder_config = config_dict.ConfigDict()
encoder_config.aggregation = 'avg_pool'
encoder_config.compute_mfccs = False
encoder_config.num_mfccs = 20 # Unused by default.
return encoder_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'handcrafted_features.HandcraftedFeatures',
compute_mfccs=config.encoder_config.get_ref('compute_mfccs'),
num_mfccs=config.encoder_config.get_ref('num_mfccs'),
aggregation=config.encoder_config.get_ref('aggregation'),
window_size=10,
window_stride=10,
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
batch_size=64,
melspec_in_pipeline=False,
random_augmentations=True,
cosine_alpha=0.0,
)
config.encoder_config = get_encoder_config()
config.init_config = presets.get_base_init_config(config, learning_rate=0.316)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_ablation_train_dataset_config(
config
)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_ablation_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep('config.init_config.rng_seed', hyper.discrete([1236])),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (large) Conformer baseline ablation."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
# Aim to have output targets of 256, starting at 144
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
# Each downsample reduces time by a factor of 2.
# An additional downsample by 4 happens in the ConvolutionalSubsampling.
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
kernel_size=15,
)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
melspec_in_pipeline=False, random_augmentations=True, cosine_alpha=0.0
)
config.init_config = presets.get_base_init_config(
config, learning_rate=3.16e-4
)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_ablation_train_dataset_config(
config
)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_ablation_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.init_config.rng_seed', hyper.discrete([1235])
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the logistic regression baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
_o = config_utils.object_config
def get_encoder_config() -> config_dict.ConfigDict:
encoder_config = config_dict.ConfigDict()
encoder_config.aggregation = 'avg_pool'
encoder_config.compute_mfccs = False
encoder_config.num_mfccs = 20 # Unused by default.
return encoder_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'handcrafted_features.HandcraftedFeatures',
compute_mfccs=config.encoder_config.get_ref('compute_mfccs'),
num_mfccs=config.encoder_config.get_ref('num_mfccs'),
aggregation=config.encoder_config.get_ref('aggregation'),
window_size=10,
window_stride=10,
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(batch_size=64, melspec_in_pipeline=False)
config.encoder_config = get_encoder_config()
config.init_config = presets.get_base_init_config(config)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_base_train_dataset_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_base_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.random_augmentations',
hyper.discrete([False, True]),
),
hyper.sweep(
'config.cosine_alpha',
# Without / with cosine decay for the learning rate.
hyper.discrete([1.0, 0.0]),
),
hyper.sweep(
'config.init_config.learning_rate',
# 10 ** np.linspace(-5, 1, 5)
hyper.discrete([1e-05, 3.16e-4, 1e-2, 3.16e-1, 1e1]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (large) EfficientNet baseline ablation."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'efficientnet.EfficientNet',
model=_c('efficientnet.EfficientNetModel', value='b5'),
)
model_config.taxonomy_loss_weight = 0.001
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
melspec_in_pipeline=False, random_augmentations=True, cosine_alpha=0.0
)
config.init_config = presets.get_base_init_config(config, learning_rate=1e-2)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_ablation_train_dataset_config(
config
)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_ablation_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep('config.init_config.rng_seed', hyper.discrete([1234])),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (large) EfficientNet baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'efficientnet.EfficientNet',
model=_c('efficientnet.EfficientNetModel', value='b5'),
)
model_config.taxonomy_loss_weight = 1e-3
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(melspec_in_pipeline=False)
config.init_config = presets.get_base_init_config(config)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_base_train_dataset_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = {
'powdermill': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='strided_windows',
slice_start=0.0,
eval_dataset_dir='soundscapes/powdermill_full_length:1.3.0',
),
'caples': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='fixed',
slice_start=0.0,
eval_dataset_dir='soundscapes/caples:1.3.0',
),
}
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.random_augmentations',
hyper.discrete([False, True]),
),
hyper.sweep(
'config.init_config.model_config.taxonomy_loss_weight',
hyper.discrete([0, 1e-3]),
),
hyper.sweep(
'config.cosine_alpha',
# Without / with cosine decay for the learning rate.
hyper.discrete([1.0, 0.0]),
),
hyper.sweep(
'config.init_config.learning_rate',
# 10 ** np.linspace(-5, 1, 5)
hyper.discrete([1e-05, 3.16e-4, 1e-2, 3.16e-1, 1e1]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the SVM baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
_o = config_utils.object_config
def get_encoder_config() -> config_dict.ConfigDict:
encoder_config = config_dict.ConfigDict()
encoder_config.aggregation = 'avg_pool'
encoder_config.compute_mfccs = False
encoder_config.num_mfccs = 20 # Unused by default.
return encoder_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'handcrafted_features.HandcraftedFeatures',
compute_mfccs=config.encoder_config.get_ref('compute_mfccs'),
num_mfccs=config.encoder_config.get_ref('num_mfccs'),
aggregation=config.encoder_config.get_ref('aggregation'),
window_size=10,
window_stride=10,
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
batch_size=64,
melspec_in_pipeline=False,
loss_fn=_o('layers.hinge_loss'),
)
config.encoder_config = get_encoder_config()
config.init_config = presets.get_base_init_config(config)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_base_train_dataset_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_base_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.random_augmentations',
hyper.discrete([False, True]),
),
hyper.sweep(
'config.cosine_alpha',
# Without / with cosine decay for the learning rate.
hyper.discrete([1.0, 0.0]),
),
hyper.sweep(
'config.init_config.learning_rate',
# 10 ** np.linspace(-5, 1, 5)
hyper.discrete([1e-05, 3.16e-4, 1e-2, 3.16e-1, 1e1]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (small) EfficientNet baseline ablation."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'efficientnet.EfficientNet',
model=_c('efficientnet.EfficientNetModel', value='b1'),
)
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
melspec_in_pipeline=False, random_augmentations=True, cosine_alpha=0.0
)
config.init_config = presets.get_base_init_config(
config, learning_rate=3.16e-4
)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_ablation_train_dataset_config(
config
)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_ablation_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep('config.init_config.rng_seed', hyper.discrete([1237])),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (small) EfficientNet baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.encoder = _c(
'efficientnet.EfficientNet',
model=_c('efficientnet.EfficientNetModel', value='b1'),
)
model_config.taxonomy_loss_weight = 1e-3
model_config.frontend = presets.get_pcen_melspec_config(config)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(melspec_in_pipeline=False)
config.init_config = presets.get_base_init_config(config)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_base_train_dataset_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = {
'powdermill': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='strided_windows',
slice_start=0.0,
eval_dataset_dir='soundscapes/powdermill_full_length:1.3.0',
),
'caples': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='fixed',
slice_start=0.0,
eval_dataset_dir='soundscapes/caples:1.3.0',
),
}
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.random_augmentations',
hyper.discrete([False, True]),
),
hyper.sweep(
'config.init_config.model_config.taxonomy_loss_weight',
hyper.discrete([0, 1e-3]),
),
hyper.sweep(
'config.cosine_alpha',
# Without / with cosine decay for the learning rate.
hyper.discrete([1.0, 0.0]),
),
hyper.sweep(
'config.init_config.learning_rate',
# 10 ** np.linspace(-5, 1, 5)
hyper.discrete([1e-05, 3.16e-4, 1e-2, 3.16e-1, 1e1]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (small) Conformer baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.taxonomy_loss_weight = 1e-3
model_config.frontend = presets.get_pcen_melspec_config(config)
# Aim to have output targets of 256, starting at 144
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
# Each downsample reduces time by a factor of 2.
# An additional downsample by 4 happens in the ConvolutionalSubsampling.
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
kernel_size=15,
num_conformer_blocks=4,
)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(melspec_in_pipeline=False)
config.init_config = presets.get_base_init_config(config)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_base_train_dataset_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = {
'powdermill': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='strided_windows',
slice_start=0.0,
eval_dataset_dir='soundscapes/powdermill_full_length:1.3.0',
),
'caples': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='fixed',
slice_start=0.0,
eval_dataset_dir='soundscapes/caples:1.3.0',
),
}
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.random_augmentations',
hyper.discrete([False, True]),
),
hyper.sweep(
'config.init_config.model_config.taxonomy_loss_weight',
hyper.discrete([0, 1e-3]),
),
hyper.sweep(
'config.cosine_alpha',
# Without / with cosine decay for the learning rate.
hyper.discrete([1.0, 0.0]),
),
hyper.sweep(
'config.init_config.learning_rate',
# 10 ** np.linspace(-5, 1, 5)
hyper.discrete([1e-05, 3.16e-4, 1e-2, 3.16e-1, 1e1]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (large) Conformer baseline."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.taxonomy_loss_weight = 1e-3
model_config.frontend = presets.get_pcen_melspec_config(config)
# Aim to have output targets of 256, starting at 144
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
# Each downsample reduces time by a factor of 2.
# An additional downsample by 4 happens in the ConvolutionalSubsampling.
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
kernel_size=15,
)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(melspec_in_pipeline=False)
config.init_config = presets.get_base_init_config(config)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_base_train_dataset_config(config)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = {
'powdermill': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='strided_windows',
slice_start=0.0,
eval_dataset_dir='soundscapes/powdermill_full_length:1.3.0',
),
'caples': presets.get_supervised_eval_pipeline(
config,
filtering_df_paths=None,
filter_by_complement=False, # Unused because filtering_df_path=None.
slice_method='fixed',
slice_start=0.0,
eval_dataset_dir='soundscapes/caples:1.3.0',
),
}
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.random_augmentations',
hyper.discrete([False, True]),
),
hyper.sweep(
'config.init_config.model_config.taxonomy_loss_weight',
hyper.discrete([0, 1e-3]),
),
hyper.sweep(
'config.cosine_alpha',
# Without / with cosine decay for the learning rate.
hyper.discrete([1.0, 0.0]),
),
hyper.sweep(
'config.init_config.learning_rate',
# 10 ** np.linspace(-5, 1, 5)
hyper.discrete([1e-05, 3.16e-4, 1e-2, 3.16e-1, 1e1]),
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the (small) Conformer baseline ablation."""
from chirp import config_utils
from chirp.configs.baselines import presets
from ml_collections import config_dict
_c = config_utils.callable_config
def get_model_config(config: config_dict.ConfigDict) -> config_dict.ConfigDict:
"""Returns the model config."""
model_config = config_dict.ConfigDict()
model_config.taxonomy_loss_weight = 0.0
model_config.frontend = presets.get_pcen_melspec_config(config)
# Aim to have output targets of 256, starting at 144
s = (256 / 144) ** (1 / 5)
model_config.encoder = _c(
'taxonomy_model.ConformerModel',
# Each downsample reduces time by a factor of 2.
# An additional downsample by 4 happens in the ConvolutionalSubsampling.
downsample=[(2, s), (5, s), (8, s), (11, s), (14, s)],
kernel_size=15,
num_conformer_blocks=4,
)
return model_config
def get_config() -> config_dict.ConfigDict:
"""Creates the configuration dictionary for training and evaluation."""
config = presets.get_base_config(
melspec_in_pipeline=False, cosine_alpha=1.0, random_augmentations=True
)
config.init_config = presets.get_base_init_config(
config, learning_rate=3.16e-4
)
config.init_config.model_config = get_model_config(config)
config.train_config = presets.get_base_train_config(config)
config.train_dataset_config = presets.get_ablation_train_dataset_config(
config
)
config.eval_config = presets.get_base_eval_config(config)
config.eval_dataset_config = presets.get_ablation_eval_dataset_config(config)
return config
def get_hyper(hyper):
"""Defines the hyperparameter sweep."""
return hyper.product([
hyper.sweep(
'config.init_config.rng_seed', hyper.discrete([1238])
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop for separation models."""
import functools
import io
from typing import Callable, Dict
from absl import logging
from chirp import export_utils
from chirp.data import utils as data_utils
from chirp.models import metrics
from chirp.models import output
from chirp.models import separation_model
from chirp.taxonomy import class_utils
from chirp.train import utils
from clu import checkpoint
from clu import metric_writers
from clu import metrics as clu_metrics
from clu import periodic_actions
import flax
import flax.jax_utils as flax_utils
import imageio as iio
import jax
from jax import numpy as jnp
from jax import random
import librosa
import librosa.display
import matplotlib.pyplot as plt
from ml_collections import config_dict
import numpy as np
import optax
import tensorflow as tf
EVAL_LOOP_SLEEP_S = 30
def p_log_snr_loss(
source: jnp.ndarray,
estimate: jnp.ndarray,
max_snr: float = 1e6,
**unused_kwargs,
):
return jnp.mean(metrics.negative_snr_loss(source, estimate, max_snr))
TRAIN_METRICS = {
'loss': clu_metrics.Average.from_output('loss'),
'taxo_loss': clu_metrics.Average.from_output('taxo_loss'),
'mixit_neg_snr': clu_metrics.Average.from_output('mixit_neg_snr'),
}
EVAL_METRICS = {
'rank_metrics': utils.CollectingMetrics.from_funs(
**{
'label_cmap': (('label_logits', 'label'), metrics.cmap),
'genus_cmap': (('genus_logits', 'genus'), metrics.cmap),
'family_cmap': (('family_logits', 'family'), metrics.cmap),
'order_cmap': (('order_logits', 'order'), metrics.cmap),
'label_roc_auc': (('label_logits', 'label'), metrics.roc_auc),
'genus_roc_auc': (('genus_logits', 'genus'), metrics.roc_auc),
'family_roc_auc': (('family_logits', 'family'), metrics.roc_auc),
'order_roc_auc': (('order_logits', 'order'), metrics.roc_auc),
}
)
}
def initialize_model(
input_shape: tuple[int, ...],
rng_seed: int,
learning_rate: float,
workdir: str,
model_config: config_dict.ConfigDict,
target_class_list: str,
):
"""Creates model for training, eval, or inference."""
# Initialize random number generator
key = random.PRNGKey(rng_seed)
# Load model
model_init_key, key = random.split(key)
class_lists = class_utils.get_class_lists(target_class_list, True)
model = separation_model.SeparationModel(
num_classes={k: len(v.classes) for (k, v) in class_lists.items()},
**model_config,
)
variables = model.init(
model_init_key, jnp.zeros((1,) + input_shape), train=False
)
model_state, params = flax.core.pop(variables, 'params')
# Initialize optimizer
optimizer = optax.adam(learning_rate=learning_rate)
opt_state = optimizer.init(params)
# Load checkpoint
ckpt = checkpoint.MultihostCheckpoint(workdir)
train_state = utils.TrainState(
step=0, params=params, opt_state=opt_state, model_state=model_state
)
train_state = ckpt.restore_or_initialize(train_state)
return (
utils.ModelBundle(
model=model,
key=key,
ckpt=ckpt,
optimizer=optimizer,
class_lists=class_lists,
),
train_state,
)
def fig_image(fig):
"""Returns an image summary from a matplotlib figure."""
buffer = io.BytesIO()
fig.savefig(buffer, format='png', bbox_inches='tight')
img = iio.imread(buffer.getvalue(), format='png')
plt.close(fig)
return img
def force_numpy(arr):
"""Ensures that arr is a numpy array."""
if isinstance(arr, np.ndarray):
return arr
if hasattr(arr, 'numpy'):
# Eager mode.
return arr.numpy()
else:
return tf.make_ndarray(arr)
def _audio_and_spectrogram_summaries(
writer: metric_writers.MetricWriter,
step: int,
title: str,
batch: Dict[str, jnp.ndarray],
separated_audio: jnp.ndarray,
sample_rate: int = 16000,
max_outputs: int = 5,
):
"""Makes audio and spectrogram summaries for MixIT models."""
def _plot_spec(data, ax, title_, do_db_scaling=True):
if data.ndim == 2:
spec = data
else:
spec = np.abs(librosa.stft(np.array(data)))
if do_db_scaling:
spec = librosa.amplitude_to_db(spec, ref=np.max)
librosa.display.specshow(
spec,
y_axis='mel',
sr=sample_rate,
x_axis='time',
ax=ax,
)
ax.set(title=title_)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('')
ax.set_ylabel('')
ax.label_outer()
# For waveforms, we expect:
# separated_audio.shape = (tpus, batch // tpus, n_src, time)
# batch['source_audio'].shape = (tpus, batch // tpus, n_mix, time)
# batch['audio'].shape = (tpus, batch // tpus, time)
mixes_of_mixes = np.reshape(batch['audio'], (-1, *batch['audio'].shape[-1:]))
mixes = np.reshape(
batch['source_audio'], (-1, *batch['source_audio'].shape[2:])
)
separated_audio = np.reshape(
separated_audio, (-1, *separated_audio.shape[-2:])
)
batch_size, n_src, *_ = separated_audio.shape
n_mixes = mixes.shape[1]
img_size = 4
n_rows = max(n_src, n_mixes + 1)
for i in range(min(batch_size, max_outputs)):
fig, axes = plt.subplots(
n_rows, 2, figsize=(2 * img_size, img_size * n_rows)
)
# Make summary for Mixture of Mixtures (MoM)
mom = mixes_of_mixes[i]
mom_title = 'MoM'
_plot_spec(mom, axes[0, 0], mom_title)
writer.write_audios(
step, {f'{title}/mom{i}': mom[None, :, None]}, sample_rate=sample_rate
)
# Make summaries for mixes
for m in range(n_mixes):
mix = mixes[i, m, ...]
mix_title = f'Mix {m+1}'
_plot_spec(mix, axes[m + 1, 0], mix_title)
writer.write_audios(
step,
{f'{title}/mix{i}_{m}': mix[None, :, None]},
sample_rate=sample_rate,
)
# Make summaries for estimated sources
for s in range(n_src):
src = separated_audio[i, s, ...]
src_title = f'Est. Source {s+1}'
_plot_spec(src, axes[s, 1], src_title)
writer.write_audios(
step,
{f'{title}/est{i}_src{s}': src[None, :, None]},
sample_rate=sample_rate,
)
# Clean up image and turn into a summary
for ax in axes.flatten():
ax.label_outer()
if not ax.has_data():
ax.set_visible(False)
plt.tight_layout()
img = fig_image(fig)
writer.write_images(step, {f'{title}/spectrograms_{i}': img[None, ...]})
def train(
model_bundle,
train_state,
train_dataset,
num_train_steps: int,
logdir: str,
log_every_steps: int,
checkpoint_every_steps: int,
loss_max_snr: float,
classify_bottleneck_weight: float,
taxonomy_labels_weight: float,
loss_fn: Callable[
[jnp.ndarray, jnp.ndarray], jnp.ndarray
] = optax.sigmoid_binary_cross_entropy,
) -> None:
"""Train a model."""
train_iterator = train_dataset.as_numpy_iterator()
train_metrics_collection = utils.NestedCollection.create(**TRAIN_METRICS)
initial_step = int(train_state.step)
train_state = flax.jax_utils.replicate(train_state)
# Logging
writer = metric_writers.create_default_writer(logdir)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
@functools.partial(jax.pmap, axis_name='batch')
def train_step(batch, train_state):
"""Training step for the separation model."""
def update_step(params, model_state):
variables = {'params': params, **model_state}
model_outputs, model_state = model_bundle.model.apply(
variables,
batch['audio'],
train=True,
mutable=list(model_state.keys()),
)
estimate, _ = metrics.least_squares_mixit(
reference=batch['source_audio'],
estimate=model_outputs.separated_audio,
)
model_outputs = model_outputs.time_reduce_logits('MIDPOINT')
taxo_loss = utils.taxonomy_loss(
outputs=model_outputs,
taxonomy_loss_weight=taxonomy_labels_weight,
loss_fn=loss_fn,
**batch,
)['loss']
mixit_neg_snr = p_log_snr_loss(
batch['source_audio'], estimate, loss_max_snr
)
loss = mixit_neg_snr
if classify_bottleneck_weight > 0.0:
loss = mixit_neg_snr + classify_bottleneck_weight * jnp.mean(taxo_loss)
train_metrics = train_metrics_collection.gather_from_model_output(
taxo_loss=taxo_loss,
mixit_neg_snr=mixit_neg_snr,
loss=loss,
**batch,
**output.logits(model_outputs),
)
return loss, (train_metrics, model_state)
grads, (train_metrics, model_state) = jax.grad(update_step, has_aux=True)(
train_state.params, train_state.model_state
)
grads = jax.lax.pmean(grads, axis_name='batch')
updates, opt_state = model_bundle.optimizer.update(
grads, train_state.opt_state
)
params = optax.apply_updates(train_state.params, updates)
train_state = utils.TrainState(
step=train_state.step + 1,
params=params,
opt_state=opt_state,
model_state=model_state,
)
return train_metrics, train_state
for step in range(initial_step, num_train_steps + 1):
with jax.profiler.StepTraceAnnotation('train__', step_num=step):
batch = next(train_iterator)
train_metrics, train_state = train_step(batch, train_state)
if step % log_every_steps == 0:
train_metrics = flax_utils.unreplicate(train_metrics).compute(
prefix='train'
)
utils.write_metrics(writer, step, train_metrics)
reporter(step)
if step % checkpoint_every_steps == 0:
with reporter.timed('checkpoint'):
model_bundle.ckpt.save(flax_utils.unreplicate(train_state))
writer.close()
def evaluate(
model_bundle: utils.ModelBundle,
train_state: utils.TrainState,
valid_dataset: tf.data.Dataset,
workdir: str,
num_train_steps: int,
loss_max_snr: float,
taxonomy_labels_weight: float,
loss_fn: Callable[
[jnp.ndarray, jnp.ndarray], jnp.ndarray
] = optax.sigmoid_binary_cross_entropy,
eval_sleep_s: int = EVAL_LOOP_SLEEP_S,
eval_steps_per_checkpoint: int = -1,
sample_rate_hz: int = 32_000, # TODO(emanilow): pipe through sample rates.
):
"""Run evaluation."""
train_metrics = TRAIN_METRICS.copy()
del train_metrics['loss']
valid_metrics_collection = utils.NestedCollection.create(
**(train_metrics | EVAL_METRICS)
)
@functools.partial(jax.pmap, axis_name='batch')
def get_metrics(batch, train_state):
variables = {'params': train_state.params, **train_state.model_state}
model_outputs = model_bundle.model.apply(
variables, batch['audio'], train=False
)
model_outputs = model_outputs.time_reduce_logits('MIDPOINT')
estimate, _ = metrics.least_squares_mixit(
reference=batch['source_audio'], estimate=model_outputs.separated_audio
)
taxo_loss = utils.taxonomy_loss(
outputs=model_outputs,
taxonomy_loss_weight=taxonomy_labels_weight,
loss_fn=loss_fn,
**batch,
)['loss']
mixit_neg_snr = p_log_snr_loss(
batch['source_audio'], estimate, loss_max_snr
)
return model_outputs, valid_metrics_collection.gather_from_model_output(
taxo_loss=taxo_loss,
mixit_neg_snr=mixit_neg_snr,
**batch,
**output.logits(model_outputs),
)
writer = metric_writers.create_default_writer(workdir, asynchronous=False)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
for train_state in utils.checkpoint_iterator(
train_state, model_bundle.ckpt, workdir, num_train_steps, eval_sleep_s
):
cur_train_step = int(train_state.step)
with reporter.timed('eval'):
valid_metrics = valid_metrics_collection.empty()
for valid_step, batch in enumerate(valid_dataset.as_numpy_iterator()):
batch = jax.tree_map(np.asarray, batch)
model_outputs, new_valid_metrics = get_metrics(
batch, flax_utils.replicate(train_state)
)
valid_metrics = valid_metrics.merge(
flax_utils.unreplicate(new_valid_metrics)
)
_audio_and_spectrogram_summaries(
writer,
cur_train_step,
'eval',
batch,
model_outputs.separated_audio,
sample_rate=sample_rate_hz,
)
if (
eval_steps_per_checkpoint > 0
and valid_step >= eval_steps_per_checkpoint
):
break
# Log validation loss
utils.write_metrics(
writer, cur_train_step, valid_metrics.compute(prefix='valid')
)
writer.flush()
def export_tf_model(
model_bundle: utils.ModelBundle,
train_state: utils.TrainState,
workdir: str,
num_train_steps: int,
frame_size: int,
eval_sleep_s: int = EVAL_LOOP_SLEEP_S,
):
"""Write a TFLite flatbuffer.
Args:
model_bundle: The model bundle.
train_state: The train state.
workdir: Where to place the exported model.
num_train_steps: Number of training steps.
frame_size: Frame size for input audio. The exported model will take inputs
with shape [B, T//frame_size, frame_size]. This ensures that the time
dimension is divisible by the product of all model strides, which allows
us to set a polymorphic time dimension. Thus, the frame_size must be
divisible by the product of all strides in the model.
eval_sleep_s: Number of seconds to sleep when waiting for next checkpoint.
"""
for train_state in utils.checkpoint_iterator(
train_state, model_bundle.ckpt, workdir, num_train_steps, eval_sleep_s
):
variables = {'params': train_state.params, **train_state.model_state}
# CAUTION: If the infer_fn signature changes, then the SeparatorTFCallback
# in the eval benchmark code will also need to be changed.
def infer_fn(framed_audio_batch, variables):
flat_inputs = jnp.reshape(
framed_audio_batch, [framed_audio_batch.shape[0], -1]
)
model_outputs = model_bundle.model.apply(
variables, flat_inputs, train=False
)
return (
model_outputs.separated_audio,
model_outputs.label,
model_outputs.embedding,
)
logging.info('Creating converted_model...')
converted_model = export_utils.Jax2TfModelWrapper(
infer_fn, variables, [None, None, frame_size], False
)
logging.info('Exporting converted_model...')
converted_model.export_converted_model(
workdir, train_state.step, model_bundle.class_lists
)
def run(
mode: str,
config: config_dict.ConfigDict,
workdir: str,
tf_data_service_address: str,
) -> None:
"""Run the experiment."""
valid_dataset = None
train_dataset = None
if mode == 'train':
train_dataset, dataset_info = data_utils.get_dataset(
is_train=True,
tf_data_service_address=tf_data_service_address,
**config.train_dataset_config,
)
elif mode == 'eval':
valid_dataset, dataset_info = data_utils.get_dataset(
**config.eval_dataset_config
)
elif mode == 'export':
valid_dataset = None
dataset_info = None
else:
raise ValueError(f'Unknown run mode: "{mode}"!')
if (
dataset_info is not None
and dataset_info.features['audio'].sample_rate != config.sample_rate_hz
):
raise ValueError(
'Dataset sample rate must match config sample rate. To address this, '
'need to set the sample rate in the config to {}.'.format(
dataset_info.features['audio'].sample_rate
)
)
model_bundle, train_state = initialize_model(
workdir=workdir, **config.init_config
)
if mode == 'train':
train_state = model_bundle.ckpt.restore_or_initialize(train_state)
train(
model_bundle,
train_state,
train_dataset,
loss_fn=config.loss_fn,
logdir=workdir,
**config.train_config,
)
elif mode == 'eval':
evaluate(
model_bundle,
train_state,
valid_dataset,
loss_fn=config.loss_fn,
workdir=workdir,
**config.eval_config,
)
elif mode == 'export':
export_tf_model(model_bundle, train_state, workdir, **config.export_config)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop."""
import functools
from typing import Callable, Sequence
from absl import logging
from chirp import export_utils
from chirp.data import utils as data_utils
from chirp.models import metrics
from chirp.models import output
from chirp.models import taxonomy_model
from chirp.taxonomy import class_utils
from chirp.train import utils
from clu import checkpoint
from clu import metric_writers
from clu import metrics as clu_metrics
from clu import periodic_actions
import flax
import flax.jax_utils as flax_utils
import jax
from jax import numpy as jnp
from jax import random
from ml_collections import config_dict
import numpy as np
import optax
import tensorflow as tf
EVAL_LOOP_SLEEP_S = 30
def get_keyed_map_fn(key):
def _map(**kwargs):
return metrics.average_precision(
scores=kwargs[f"{key}_logits"],
labels=kwargs[key],
label_mask=kwargs.get(f"{key}_mask", None),
)
return _map
def get_train_metrics(
keys: list[str], num_labels: dict[str, int]
) -> dict[str, type[clu_metrics.Metric]]:
"""Create a collection of metrics with cross-entropy and average precision."""
metrics_ = {"loss": clu_metrics.Average.from_output("loss")}
for key in keys:
metrics_[f"{key}_loss"] = utils.MultiAverage.create(
num_labels[key]
).from_output(f"{key}_loss")
metrics_[f"{key}_map"] = clu_metrics.Average.from_fun(get_keyed_map_fn(key))
return metrics_
def initialize_model(
model_config: config_dict.ConfigDict,
rng_seed: int,
input_shape: Sequence[int],
learning_rate: float,
workdir: str,
target_class_list: str,
optimizer: optax.GradientTransformation | None = None,
for_inference: bool = False,
) -> tuple[utils.ModelBundle, utils.TrainState]:
"""Creates model for training, eval, or inference.
Args:
model_config: A config dict of the model parameters.
rng_seed: Used to see the random number generator.
input_shape: Shape of the model inputs.
learning_rate: The learning rate to use for training.
workdir: The directory the checkpoint is stored in.
target_class_list: A list of target classes for the classifier output.
optimizer: The optimizer to use during training. Optional for when loading
pre-trained models for inference.
for_inference: Indicates whether the model is being initialized for
inference (if false, initialzed for training).
Note: learning_rate is unused (it's expected to be used in constructing the
`optimizer` argument), but it's left part of the function signature for
backwards compatibility with the config utils.
Returns:
A tuple of initialized ModelBundle and TrainState objects.
"""
del learning_rate
# Initialize random number generator
key = random.PRNGKey(rng_seed)
# Load model
model_init_key, key = random.split(key)
class_lists = class_utils.get_class_lists(target_class_list, True)
model = taxonomy_model.TaxonomyModel(
num_classes={k: len(v.classes) for (k, v) in class_lists.items()},
**model_config,
)
# Ensure input_shape is a tuple for concatenation.
input_shape = tuple(input_shape)
variables = model.init(
model_init_key, jnp.zeros((1,) + input_shape), train=False
)
model_state, params = flax.core.pop(variables, "params")
# NOTE: https://github.com/deepmind/optax/issues/160
params = flax.core.unfreeze(params)
# Initialize optimizer and handle constraints
if optimizer is None or for_inference:
opt_state = None
logging.info("No optimizer specified - loading model for inference.")
else:
opt_state = optimizer.init(params)
# Load checkpoint
ckpt = checkpoint.MultihostCheckpoint(workdir)
train_state = utils.TrainState(
step=0, params=params, opt_state=opt_state, model_state=model_state
)
return (
utils.ModelBundle(
model=model,
key=key,
ckpt=ckpt,
optimizer=optimizer,
class_lists=class_lists,
),
train_state,
)
def train(
model_bundle,
train_state,
train_dataset,
num_train_steps: int,
logdir: str,
log_every_steps: int,
checkpoint_every_steps: int,
loss_fn: Callable[
[jnp.ndarray, jnp.ndarray], jnp.ndarray
] = optax.sigmoid_binary_cross_entropy,
) -> None:
"""Train a model.
Args:
model_bundle: Static objects for conducting the experiment.
train_state: Initial utils.TrainState.
train_dataset: Training dataset.
num_train_steps: The number of training steps.
logdir: Directory to use for logging.
log_every_steps: Write the training minibatch loss.
checkpoint_every_steps: Checkpoint the model and training state.
loss_fn: Loss function used for training.
"""
train_iterator = train_dataset.as_numpy_iterator()
taxonomy_keys = ["label"]
taxonomy_loss_weight = model_bundle.model.taxonomy_loss_weight
if taxonomy_loss_weight != 0.0:
taxonomy_keys += utils.TAXONOMY_KEYS
train_metrics_collection = utils.NestedCollection.create(
**get_train_metrics(taxonomy_keys, model_bundle.model.num_classes)
)
# Forward pass and metrics
def forward(params, key, batch, model_state):
dropout_key, low_pass_key, patch_mask_key = random.split(key, num=3)
variables = {"params": params, **model_state}
model_outputs, model_state = model_bundle.model.apply(
variables,
batch["audio"],
train=True,
mutable=list(model_state.keys()),
rngs={
"dropout": dropout_key,
"low_pass": low_pass_key,
"patch_mask": patch_mask_key,
},
)
losses = utils.taxonomy_loss(
outputs=model_outputs,
taxonomy_loss_weight=taxonomy_loss_weight,
loss_fn=loss_fn,
**batch,
)
train_metrics = train_metrics_collection.gather_from_model_output(
**output.logits(model_outputs),
**losses,
**batch,
)
return jnp.mean(losses["loss"]), (train_metrics, model_state)
# Define update step
@functools.partial(jax.pmap, axis_name="batch")
def update_step(key, batch, train_state):
grads, (train_metrics, model_state) = jax.grad(forward, has_aux=True)(
train_state.params, key, batch, train_state.model_state
)
grads = jax.lax.pmean(grads, axis_name="batch")
updates, opt_state = model_bundle.optimizer.update(
grads, train_state.opt_state, train_state.params
)
params = optax.apply_updates(train_state.params, updates)
train_state = utils.TrainState(
step=train_state.step + 1,
params=params,
opt_state=opt_state,
model_state=model_state,
)
return train_metrics, train_state
initial_step = int(train_state.step)
train_state = flax_utils.replicate(train_state)
# Logging
writer = metric_writers.create_default_writer(logdir)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
# Training and evaluation loop
key = model_bundle.key
for step in range(initial_step, num_train_steps + 1):
with jax.profiler.StepTraceAnnotation("train", step_num=step):
batch = next(train_iterator)
step_key, key = random.split(key)
step_key = random.split(step_key, num=jax.local_device_count())
train_metrics, train_state = update_step(step_key, batch, train_state)
if step % log_every_steps == 0:
train_metrics = flax_utils.unreplicate(train_metrics).compute(
prefix="train"
)
utils.write_metrics(writer, step, train_metrics)
reporter(step)
if (step + 1) % checkpoint_every_steps == 0 or step == num_train_steps:
with reporter.timed("checkpoint"):
model_bundle.ckpt.save(flax_utils.unreplicate(train_state))
writer.close()
def evaluate(
model_bundle: utils.ModelBundle,
train_state: utils.TrainState,
valid_dataset: tf.data.Dataset,
workdir: str,
num_train_steps: int,
loss_fn: Callable[
[jnp.ndarray, jnp.ndarray], jnp.ndarray
] = optax.sigmoid_binary_cross_entropy,
eval_steps_per_checkpoint: int | None = None,
eval_sleep_s: int = EVAL_LOOP_SLEEP_S,
name: str = "valid",
):
"""Run evaluation."""
taxonomy_keys = ["label"]
taxonomy_loss_weight = model_bundle.model.taxonomy_loss_weight
if taxonomy_loss_weight != 0.0:
taxonomy_keys += utils.TAXONOMY_KEYS
# The metrics are the same as for training, but with rank-based metrics added.
metrics_ = get_train_metrics(taxonomy_keys, model_bundle.model.num_classes)
valid_metrics = {}
for key in taxonomy_keys:
valid_metrics[f"{key}_cmap"] = ((f"{key}_logits", key), metrics.cmap)
valid_metrics[f"{key}_roc_auc"] = ((f"{key}_logits", key), metrics.roc_auc)
metrics_["rank_metrics"] = utils.CollectingMetrics.from_funs(**valid_metrics)
valid_metrics_collection = utils.NestedCollection.create(**metrics_)
@functools.partial(jax.pmap, axis_name="batch")
def get_metrics(batch, train_state):
variables = {"params": train_state.params, **train_state.model_state}
kwargs = {"mask": batch["audio_mask"]} if "audio_mask" in batch else {}
model_outputs = model_bundle.model.apply(
variables, batch["audio"], train=False, **kwargs
)
losses = utils.taxonomy_loss(
outputs=model_outputs,
taxonomy_loss_weight=taxonomy_loss_weight,
loss_fn=loss_fn,
**batch,
)
return valid_metrics_collection.gather_from_model_output(
**output.logits(model_outputs),
**batch,
**losses,
)
@jax.jit
def split_batch(batch):
batch_size = batch["audio"].shape[0]
num_devices = jax.local_device_count()
device_batch_size = batch_size // num_devices
def device_batch_fn(x):
return jnp.reshape(
x[: device_batch_size * num_devices],
(num_devices, device_batch_size) + x.shape[1:],
)
def remainder_batch_fn(x):
return x[device_batch_size * num_devices :][None]
return (
jax.tree_map(device_batch_fn, batch),
jax.tree_map(remainder_batch_fn, batch),
)
writer = metric_writers.create_default_writer(workdir)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
for train_state in utils.checkpoint_iterator(
train_state, model_bundle.ckpt, workdir, num_train_steps, eval_sleep_s
):
step = int(train_state.step)
replicated_train_state = flax_utils.replicate(train_state)
with reporter.timed("eval"):
valid_metrics = valid_metrics_collection.empty()
for s, batch in enumerate(valid_dataset.as_numpy_iterator()):
batch = jax.tree_map(np.asarray, batch)
# Handle device batching if it's not been handled by the data pipeliine
# already.
if batch["label"].ndim == 2:
even_batch, remainder_batch = split_batch(batch)
# It's possible for `even_batch` to be empty if the batch size is
# smaller than the local device count (in which case all examples in
# the batch are found in `remainder_batch`).
if even_batch["label"].shape[1] > 0:
new_valid_metrics = get_metrics(even_batch, replicated_train_state)
valid_metrics = valid_metrics.merge(
flax_utils.unreplicate(new_valid_metrics)
)
# It's also possible for `remainder_batch` to be empty if the batch
# size is an exact multiple of the local device count (in which case
# all examples in the batch are found in `even_batch`).
if remainder_batch["label"].shape[1] > 0:
new_valid_metrics = get_metrics(
remainder_batch,
# The remainder batch has shape [1, ...] rather than
# [jax.local_device_count(), ...].
jax.tree_map(lambda x: x[:1], replicated_train_state),
)
valid_metrics = valid_metrics.merge(
flax_utils.unreplicate(new_valid_metrics)
)
else:
new_valid_metrics = get_metrics(batch, replicated_train_state)
valid_metrics = valid_metrics.merge(
flax_utils.unreplicate(new_valid_metrics)
)
if (
eval_steps_per_checkpoint is not None
and s >= eval_steps_per_checkpoint
):
break
# Log validation loss
utils.write_metrics(writer, step, valid_metrics.compute(prefix=name))
writer.flush()
def export_tf_model(
model_bundle: utils.ModelBundle,
train_state: utils.TrainState,
workdir: str,
input_shape: tuple[int, ...],
num_train_steps: int,
eval_sleep_s: int = EVAL_LOOP_SLEEP_S,
polymorphic_batch: bool = True,
):
"""Export SavedModel and TFLite."""
for train_state in utils.checkpoint_iterator(
train_state, model_bundle.ckpt, workdir, num_train_steps, eval_sleep_s
):
variables = {"params": train_state.params, **train_state.model_state}
def infer_fn(audio_batch, variables):
model_outputs = model_bundle.model.apply(
variables, audio_batch, train=False
)
return model_outputs.label, model_outputs.embedding
if polymorphic_batch:
shape = (None,) + input_shape
else:
shape = (1,) + input_shape
converted_model = export_utils.Jax2TfModelWrapper(
infer_fn, variables, shape, False
)
converted_model.export_converted_model(
workdir, train_state.step, model_bundle.class_lists
)
def run(
mode: str,
config: config_dict.ConfigDict,
workdir: str,
tf_data_service_address: str,
) -> None:
"""Run the experiment."""
if mode.startswith("eval_"):
mode, name = mode.split("_", maxsplit=1)
config.eval_dataset_config = getattr(config.eval_dataset_config, name)
else:
name = "valid"
if mode == "train":
train_dataset, dataset_info = data_utils.get_dataset(
is_train=True,
tf_data_service_address=tf_data_service_address,
**config.train_dataset_config,
)
valid_dataset = None
elif mode == "eval":
valid_dataset, dataset_info = data_utils.get_dataset(
**config.eval_dataset_config
)
train_dataset = None
elif mode == "export":
train_dataset, valid_dataset, dataset_info = None, None, None
else:
raise ValueError(f"unknown mode ({mode})")
if (
dataset_info is not None
and dataset_info.features["audio"].sample_rate != config.sample_rate_hz
):
raise ValueError(
"Dataset sample rate must match config sample rate. To address this, "
"need to set the sample rate in the config to {}.".format(
dataset_info.features["audio"].sample_rate
)
)
model_bundle, train_state = initialize_model(
workdir=workdir, **config.init_config
)
if mode == "train":
train_state = model_bundle.ckpt.restore_or_initialize(train_state)
train(
model_bundle,
train_state,
train_dataset,
loss_fn=config.loss_fn,
logdir=workdir,
**config.train_config,
)
elif mode == "eval":
evaluate(
model_bundle,
train_state,
valid_dataset,
loss_fn=config.loss_fn,
workdir=workdir,
name=name,
**config.eval_config,
)
elif mode == "export":
export_tf_model(
model_bundle,
train_state,
workdir=workdir,
**config.export_config,
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop for MAE."""
import functools
from chirp.data import utils as data_utils
from chirp.models import mae
from chirp.models import taxonomy_model
from chirp.taxonomy import class_utils
from chirp.train import classifier
from chirp.train import utils
from clu import checkpoint
from clu import metric_writers
from clu import periodic_actions
import flax.jax_utils as flax_utils
import jax
from jax import numpy as jnp
from jax import random
from ml_collections import config_dict
import optax
def initialize_model(
model_config: config_dict.ConfigDict,
rng_seed: int,
input_shape: tuple[int, ...],
learning_rate: float,
workdir: str,
) -> tuple[utils.ModelBundle, utils.TrainState]:
"""Creates model for training, eval, or inference."""
del model_config
# Initialize random number generator
key = random.PRNGKey(rng_seed)
# Handle lazy computation
input_shape = tuple(s.get() if hasattr(s, "get") else s for s in input_shape)
# Load model
model_init_key, key = random.split(key)
model = mae.MaskedAutoencoder(
encoder=mae.Encoder(), decoder=mae.Decoder(output_size=input_shape)
)
variables = model.init(
model_init_key, jnp.zeros((1,) + input_shape), train=False
)
model_state, params = variables.pop("params")
# NOTE: https://github.com/deepmind/optax/issues/160
params = params.unfreeze()
# Initialize optimizer and handle constraints
optimizer = optax.adamw(
learning_rate=optax.cosine_decay_schedule(
init_value=2 * learning_rate,
# Assume 50 epochs with batches of 64
decay_steps=2_914_000 * 50 // 64,
alpha=1e-2,
),
b2=0.95,
)
opt_state = optimizer.init(params)
# Load checkpoint
ckpt = checkpoint.MultihostCheckpoint(workdir)
train_state = utils.TrainState(
step=0, params=params, opt_state=opt_state, model_state=model_state
)
return (
utils.ModelBundle(model=model, key=key, ckpt=ckpt, optimizer=optimizer),
train_state,
)
def initialize_finetune_model(
model_config: config_dict.ConfigDict,
rng_seed: int,
input_shape: tuple[int, ...],
learning_rate: float,
workdir: str,
target_class_list: str,
) -> tuple[classifier.utils.ModelBundle, classifier.utils.TrainState]:
"""Creates model for training, eval, or inference."""
# Initialize random number generator
key = random.PRNGKey(rng_seed)
# Handle lazy computation
input_shape = tuple(s.get() if hasattr(s, "get") else s for s in input_shape)
class_lists = class_utils.get_class_lists(target_class_list, True)
# Load model
model_init_key, key = random.split(key)
model = taxonomy_model.TaxonomyModel(
num_classes={k: len(v.classes) for (k, v) in class_lists.items()},
encoder=mae.Embedder(encoder=mae.Encoder(mask_rate=0.75)),
taxonomy_loss_weight=0.0,
)
variables = model.init(
model_init_key, jnp.zeros((1,) + input_shape), train=False
)
model_state, params = variables.pop("params")
# NOTE: https://github.com/deepmind/optax/issues/160
params = params.unfreeze()
# Load checkpoint
mae_model_bundle, mae_train_state = initialize_model(
**model_config.mae_init_config
)
mae_train_state = mae_model_bundle.ckpt.restore(mae_train_state)
params["encoder"]["encoder"] = mae_train_state.params["encoder"]
if mae_train_state.model_state:
raise ValueError(
"currently only models without model state "
"(such as batch statistics) are handled"
)
# Initialize optimizer and handle constraints
optimizer = optax.adam(learning_rate=learning_rate)
opt_state = optimizer.init(params)
# Load checkpoint
ckpt = checkpoint.MultihostCheckpoint(workdir)
train_state = classifier.utils.TrainState(
step=0, params=params, opt_state=opt_state, model_state=model_state
)
return (
classifier.utils.ModelBundle(
model=model,
key=key,
ckpt=ckpt,
optimizer=optimizer,
class_lists=class_lists,
),
train_state,
)
def train(
model_bundle,
train_state,
train_dataset,
num_train_steps: int,
logdir: str,
log_every_steps: int,
checkpoint_every_steps: int,
) -> None:
"""Train a model.
Args:
model_bundle: Static objects for conducting the experiment.
train_state: Initial utils.TrainState.
train_dataset: Training dataset.
num_train_steps: The number of training steps.
logdir: Directory to use for logging.
log_every_steps: Write the training minibatch loss.
checkpoint_every_steps: Checkpoint the model and training state.
"""
train_iterator = train_dataset.as_numpy_iterator()
# Forward pass and metrics
def forward(params, key, batch, model_state):
dropout_key, patch_mask_key = random.split(key)
variables = {"params": params, **model_state}
model_outputs, model_state = model_bundle.model.apply(
variables,
batch["audio"],
train=True,
mutable=list(model_state.keys()),
rngs={"dropout": dropout_key, "patch_mask": patch_mask_key},
)
# The decoded patches, the original patches, and indices of the ones that
# were masked
decoded_patches, patches, masked = model_outputs
loss = (
jnp.mean(
jnp.sum(
(
jnp.take_along_axis(
patches, masked[..., jnp.newaxis], axis=1
)
- jnp.take_along_axis(
decoded_patches, masked[..., jnp.newaxis], axis=1
)
)
** 2,
axis=-1,
)
)
/ model_bundle.model.encoder.mask_rate
)
b, h, w, c = batch["audio"].shape
ph, pw = model_bundle.model.encoder.patch_size
reconstructed = jnp.reshape(
decoded_patches, (b, h // ph, w // pw, ph, pw, c)
)
reconstructed = jnp.reshape(
jnp.swapaxes(reconstructed, -3, -4), (b, h, w, c)
)
images = {
"original": batch["audio"][:1],
"reconstructed": reconstructed[:1],
}
return loss, ({"loss": loss, "images": images}, model_state)
# Define update step
@functools.partial(jax.pmap, axis_name="batch")
def update_step(key, batch, train_state):
grads, (train_metrics, model_state) = jax.grad(forward, has_aux=True)(
train_state.params, key, batch, train_state.model_state
)
grads = jax.lax.pmean(grads, axis_name="batch")
updates, opt_state = model_bundle.optimizer.update(
grads, train_state.opt_state, train_state.params
)
params = optax.apply_updates(train_state.params, updates)
train_state = utils.TrainState(
step=train_state.step + 1,
params=params,
opt_state=opt_state,
model_state=model_state,
)
return train_metrics, train_state
initial_step = int(train_state.step)
train_state = flax_utils.replicate(train_state)
# Logging
writer = metric_writers.create_default_writer(logdir)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
# Training and evaluation loop
key = model_bundle.key
for step in range(initial_step, num_train_steps + 1):
with jax.profiler.StepTraceAnnotation("train", step_num=step):
batch = next(train_iterator)
step_key, key = random.split(key)
step_key = random.split(step_key, num=jax.local_device_count())
train_metrics, train_state = update_step(step_key, batch, train_state)
train_metrics = flax_utils.unreplicate(train_metrics)
if step % log_every_steps == 0:
images = train_metrics.pop("images")
writer.write_scalars(step, train_metrics)
writer.write_summaries(step, images)
reporter(step)
if (step + 1) % checkpoint_every_steps == 0 or step == num_train_steps:
with reporter.timed("checkpoint"):
model_bundle.ckpt.save(flax_utils.unreplicate(train_state))
writer.close()
def run(
mode: str,
config: config_dict.ConfigDict,
workdir: str,
tf_data_service_address: str,
) -> None:
"""Run the experiment."""
if mode in ("train", "finetune"):
train_dataset, dataset_info = data_utils.get_dataset(
is_train=True,
tf_data_service_address=tf_data_service_address,
**config.train_dataset_config,
)
elif mode == "eval":
valid_dataset, dataset_info = data_utils.get_dataset(
**config.eval_dataset_config
)
elif mode == "export":
valid_dataset, dataset_info = None, None
if (
dataset_info is not None
and dataset_info.features["audio"].sample_rate != config.sample_rate_hz
):
raise ValueError(
"Dataset sample rate must match config sample rate. To address this, "
"need to set the sample rate in the config to {}.".format(
dataset_info.features["audio"].sample_rate
)
)
if mode == "train":
model_bundle, train_state = initialize_model(
workdir=workdir, **config.init_config
)
else:
model_bundle, train_state = initialize_finetune_model(
workdir=workdir, **config.init_config
)
if mode == "train":
train_state = model_bundle.ckpt.restore_or_initialize(train_state)
train(
model_bundle,
train_state,
train_dataset,
logdir=workdir,
**config.train_config,
)
if mode == "finetune":
train_state = model_bundle.ckpt.restore_or_initialize(train_state)
classifier.train(
model_bundle,
train_state,
train_dataset,
logdir=workdir,
**config.train_config,
)
elif mode == "eval":
classifier.evaluate(
model_bundle,
train_state,
valid_dataset,
loss_fn=config.loss_fn,
workdir=workdir,
**config.eval_config,
)
elif mode == "export":
classifier.export_tf_model(
model_bundle,
train_state,
workdir=workdir,
**config.export_config,
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utilities for training scripts."""
import itertools
import os
import time
from typing import Callable
from absl import logging
from chirp import path_utils
from chirp.models import output
from chirp.taxonomy import namespace
from clu import checkpoint
from clu import metrics as clu_metrics
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
import numpy as np
import optax
import tensorflow as tf
TAXONOMY_KEYS = ['genus', 'family', 'order']
@flax.struct.dataclass
class TrainState:
step: int
params: flax.core.scope.VariableDict
opt_state: optax.OptState
model_state: flax.core.scope.FrozenVariableDict
@flax.struct.dataclass
class ModelBundle:
model: nn.Module
key: jnp.ndarray
ckpt: checkpoint.Checkpoint
optimizer: optax.GradientTransformation | None = None
class_lists: dict[str, namespace.ClassList] | None = None
@flax.struct.dataclass
class MultiAverage(clu_metrics.Average):
"""Computes the average of all values on the last dimension."""
total: jnp.ndarray
count: jnp.ndarray
@classmethod
def create(cls, n: int):
return flax.struct.dataclass(
type('_InlineMultiAverage', (MultiAverage,), {'_n': n})
)
@classmethod
def empty(cls) -> clu_metrics.Metric:
# pytype: disable=attribute-error
return cls(
total=jnp.zeros(cls._n, jnp.float32), count=jnp.zeros(cls._n, jnp.int32)
)
# pytype: enable=attribute-error
@classmethod
def from_model_output(
cls, values: jnp.ndarray, mask: jnp.ndarray | None = None, **_
) -> clu_metrics.Metric:
if values.ndim == 0:
raise ValueError('expected a vector')
if mask is None:
mask = jnp.ones_like(values)
# Leading dimensions of mask and values must match.
if mask.shape[0] != values.shape[0]:
raise ValueError(
'Argument `mask` must have the same leading dimension as `values`. '
f'Received mask of dimension {mask.shape} '
f'and values of dimension {values.shape}.'
)
# Broadcast mask to the same number of dimensions as values.
if mask.ndim < values.ndim:
mask = jnp.expand_dims(
mask, axis=tuple(np.arange(mask.ndim, values.ndim))
)
mask = mask.astype(bool)
axes = tuple(np.arange(values.ndim - 1))
return cls(
total=jnp.where(mask, values, jnp.zeros_like(values)).sum(axis=axes),
count=jnp.where(
mask,
jnp.ones_like(values, dtype=jnp.int32),
jnp.zeros_like(values, dtype=jnp.int32),
).sum(axis=axes),
)
def compute(self):
return {
'mean': jnp.sum(self.total) / jnp.sum(self.count),
'individual': self.total / self.count,
}
class CollectingMetrics(clu_metrics.Metric):
"""Metrics that must be calculated on collected values.
To avoid having multiple metrics collect the same values (which could require
lots of memory) this metric collects all values once, and then applies
several functions to the collected values to compute metrics.
"""
@classmethod
def from_funs(cls, **funs):
"""Construct from a set of functions.
Args:
**funs: A mapping from metric names to 2-tuples, where the first element
is a list of model outputs that need to be collected, and the second
element is a function which will be applied to the collected model
outputs in order to calculate the final metric value.
Returns:
A metric class that computes metrics using collected values.
"""
names = list(
set(
itertools.chain.from_iterable(metric[0] for metric in funs.values())
)
)
@flax.struct.dataclass
class FromFuns(clu_metrics.CollectingMetric.from_outputs(names)):
"""Collecting metric which applies functions to collected values."""
def compute(self):
"""Compute metrics by applying functions to collected values.
Note that this deviates from the standard `compute` signature, which
normally returns a scalar or array.
Returns:
A dictionary mapping metric names to compute values, which can either
be scalars/arrays or another dictionary of computed metrics.
"""
with jax.default_device(jax.devices('cpu')[0]):
values = super().compute()
return {
metric_name: metric[1](*(values[name] for name in metric[0]))
for metric_name, metric in funs.items()
}
compute_value = None
return FromFuns
def flatten(dict_, parent_key='', sep='_'):
"""Recursively flatten dictionaries with string keys.
Args:
dict_: The dictionary to flatten.
parent_key: The name of the parent key.
sep: The separator used to combine keys.
Returns:
A flat dictionary.
"""
flattened_dict = {}
for k, v in dict_.items():
child_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
flattened_dict |= flatten(v, child_key, sep=sep)
else:
flattened_dict[child_key] = v
return flattened_dict
class NestedCollection(clu_metrics.Collection):
"""Collection that handles metrics which return multiple values."""
@classmethod
def create(cls, **metrics):
# TODO(bartvm): This should be fixed in parent class
return flax.struct.dataclass(
type('_InlineCollection', (cls,), {'__annotations__': metrics})
)
def compute(self, prefix: str = ''):
return flatten(super().compute(), parent_key=prefix)
def compute_values(self, prefix: str = ''):
return flatten(super().compute_values(), parent_key=prefix)
def write_metrics(writer, step, metrics):
"""Helper function for logging both scalars and arrays."""
scalars = {k: v for k, v in metrics.items() if v.ndim == 0}
summaries = {k: v for k, v in metrics.items() if v.ndim != 0}
writer.write_scalars(step, scalars)
writer.write_summaries(step, summaries)
def wait_for_next_checkpoint(
train_state, ckpt, last_ckpt_path, workdir, sleep_s: int = 5
):
"""Wait for the next checkpoint to arrive and load train_state."""
while True:
next_ckpt_path = ckpt.get_latest_checkpoint_to_restore_from()
if next_ckpt_path is None:
logging.warning('No checkpoint found; sleeping.')
time.sleep(sleep_s)
continue
elif next_ckpt_path == last_ckpt_path:
logging.warning('No new checkpoint found; sleeping.')
time.sleep(sleep_s)
continue
try:
new_train_state = ckpt.restore(train_state, next_ckpt_path)
break
except tf.errors.NotFoundError:
logging.warning(
'Checkpoint %s not found in workdir %s',
ckpt.latest_checkpoint,
workdir,
)
time.sleep(sleep_s)
continue
return new_train_state, next_ckpt_path
def checkpoint_iterator(
train_state: TrainState,
ckpt: checkpoint.Checkpoint,
workdir: str,
num_train_steps: int,
sleep_s: int = 5,
):
"""Iterate over checkpoints produced by the train job."""
last_step = -1
last_ckpt_path = ''
elapsed = -1
st = time.time()
while last_step < num_train_steps:
if elapsed is None:
elapsed = time.time() - st
logging.info(
'Finished processing checkpoint %d in %8.2f s', last_step, elapsed
)
new_ckpt_path = ckpt.get_latest_checkpoint_to_restore_from()
if new_ckpt_path is None:
logging.warning('No checkpoint found; sleeping.')
time.sleep(sleep_s)
continue
elif new_ckpt_path == last_ckpt_path:
logging.warning('No new checkpoint found; sleeping.')
time.sleep(sleep_s)
continue
try:
new_train_state = ckpt.restore(train_state, new_ckpt_path)
except tf.errors.NotFoundError:
logging.warning(
'Checkpoint %s not found in workdir %s',
ckpt.latest_checkpoint,
workdir,
)
time.sleep(sleep_s)
continue
except Exception as error:
logging.warning(
'Unknown exception %s not found in workdir %s',
ckpt.latest_checkpoint,
workdir,
)
logging.error(error)
time.sleep(sleep_s)
continue
last_ckpt_path = new_ckpt_path
train_state = new_train_state
last_step = int(train_state.step)
elapsed = None
st = time.time()
logging.info('Loaded checkpoint at step %d', int(train_state.step))
yield train_state
def taxonomy_loss(
outputs: output.TaxonomicOutput,
taxonomy_loss_weight: float,
loss_fn: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray],
**kwargs,
) -> jnp.ndarray:
"""Computes the mean loss across taxonomic labels."""
losses = {'label_loss': loss_fn(getattr(outputs, 'label'), kwargs['label'])}
losses['loss'] = jnp.mean(losses['label_loss'], axis=-1)
if taxonomy_loss_weight != 0:
losses.update(
{
f'{key}_loss': loss_fn(getattr(outputs, key), kwargs[key])
for key in TAXONOMY_KEYS
if key in kwargs
}
)
losses['loss'] = losses['loss'] + sum(
taxonomy_loss_weight * jnp.mean(losses[f'{key}_loss'], axis=-1)
for key in TAXONOMY_KEYS
)
return losses # pytype: disable=bad-return-type # jax-ndarray
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop."""
import enum
import functools
import os
import time
from typing import Any, Callable
from absl import logging
from chirp.data import utils as data_utils
from chirp.models import frontend as frontend_models
from chirp.models import hubert
from chirp.models import layers
from chirp.models import metrics
from chirp.models import output
from chirp.models import quantizers
from chirp.taxonomy import class_utils
from chirp.train import utils
from clu import checkpoint
from clu import metric_writers
from clu import metrics as clu_metrics
from clu import periodic_actions
import flax
from flax import traverse_util
import flax.jax_utils as flax_utils
import jax
from jax import numpy as jnp
from jax import random
from jax import tree_util
from jax.experimental import jax2tf
from ml_collections import config_dict
import numpy as np
import optax
import tensorflow as tf
EVAL_LOOP_SLEEP_S = 30
def filter_loss(loss, keep_inds):
"""Filters `loss` based on `keep_inds`.
Args:
loss: [ns, bsz, sz]. The loss for each frame (sz) in each batch sample (bsz)
for each quantizer section (ns) with ns > 1 if using product quantization.
keep_inds: [bsz, sz]. A mask that determines which frames to consider.
Returns:
loss_filtered: [ns, bsz, sz]. A jnp.array that is such that averaging over
it yields the same result as averaging over loss[keep_inds], which we can't
compute directly due to a concretization error.
"""
# First, compute the mean of the entries to keep, as per `keep_inds`.
loss_filtered_zeros = jnp.where(jnp.squeeze(keep_inds), loss, 0)
mean_of_kept = jnp.sum(loss_filtered_zeros) / jnp.sum(keep_inds)
# Now replace the entries of `loss` that we don't want to keep by this mean.
loss_filtered = jnp.where(keep_inds, loss, mean_of_kept)
return loss_filtered
def filtered_hubert_loss_from_outputs(
outputs: hubert.HubertOutput, keep_inds: jnp.ndarray, **unused_kwargs
) -> jnp.ndarray:
"""Cross entropy from model outputs for the given subset of `keep_inds`."""
logits = outputs.logits
targets = outputs.targets
# `logits` and `targets` are lists whose length will be the number of
# quantizers `nq`.
losses = []
# Each of l and t have shape [ns, bsz, sz, nc].
for logit, target in zip(logits, targets):
# [ns, bsz, sz].
loss = optax.softmax_cross_entropy(logit, target)
# [ns, bsz, sz].
loss_filtered = filter_loss(loss, keep_inds)
losses.append(loss_filtered)
# [nq, ns, bsz, sz].
losses = jnp.stack(losses, axis=0)
return losses
def hubert_loss_from_outputs(
outputs: hubert.HubertOutput,
alpha: float,
hubert_loss_mult: float,
**unused_kwargs,
) -> jnp.ndarray:
"""Cross entropy computed from model outputs."""
mask_idc = outputs.mask_idc
# Compute the loss on the unmasked and masked frames separately.
loss_u = filtered_hubert_loss_from_outputs(
outputs, jnp.where(mask_idc, False, True)
)
loss_m = filtered_hubert_loss_from_outputs(
outputs, jnp.where(mask_idc, True, False)
)
return hubert_loss_mult * (alpha * loss_m + (1 - alpha) * loss_u)
def quantizer_loss(
outputs: hubert.HubertOutput, quant_loss_mult: float, **unused_kwargs
) -> jnp.ndarray:
"""Get quantization loss from model outputs."""
del unused_kwargs
# [bsz, sz, csz] or [bsz, sz, 1] (depending on the quantizer).
quant_loss = outputs.quantization_loss
quant_loss = jnp.squeeze(jnp.mean(quant_loss, -1))
# [bsz, sz].
return quant_loss * quant_loss_mult
def taxonomy_cross_entropy(
outputs: hubert.HubertOutput,
taxonomy_loss_weight: float,
label: jnp.ndarray,
genus: jnp.ndarray | None = None,
family: jnp.ndarray | None = None,
order: jnp.ndarray | None = None,
**unused_kwargs,
) -> jnp.ndarray:
"""Computes mean cross entropy across taxonomic labels."""
def aggregate_losses(preds, target):
# Iterate over the label made from different readout points.
losses = []
for l in preds:
losses.append(
jnp.mean(optax.sigmoid_binary_cross_entropy(l, target), axis=-1)
)
return jnp.sum(jnp.stack(losses, axis=0), axis=0)
mean = aggregate_losses(outputs.label, label)
if taxonomy_loss_weight != 0:
mean += taxonomy_loss_weight * aggregate_losses(outputs.genus, genus)
mean += taxonomy_loss_weight * aggregate_losses(outputs.family, family)
mean += taxonomy_loss_weight * aggregate_losses(outputs.order, order)
return mean
def supervised_loss(
outputs: hubert.HubertOutput,
taxonomy_loss_weight: float,
readout_loss_mult: float,
label: jnp.ndarray,
genus: jnp.ndarray | None = None,
family: jnp.ndarray | None = None,
order: jnp.ndarray | None = None,
**unused_kwargs,
) -> jnp.ndarray:
"""Compute classification loss for all taxonomy heads."""
del unused_kwargs
if not readout_loss_mult:
# Avoid computing the loss if not needed.
# [bsz, sz].
return jnp.zeros(outputs.logits[0].shape[:-1])
loss = taxonomy_cross_entropy(
outputs, taxonomy_loss_weight, label, genus, family, order
) # [bsz].
# Make it [bsz, sz] so that it can be element-wise added to other losses.
sz = outputs.logits[0].shape[-2]
loss = jnp.repeat(jnp.expand_dims(loss, axis=-1), axis=-1, repeats=sz)
return loss * readout_loss_mult
def keyed_cross_entropy(
key: str,
outputs: hubert.HubertOutput,
readout_index: int = 0,
**kwargs,
) -> jnp.ndarray | None:
"""Cross entropy for the specified taxonomic label set."""
outputs = getattr(outputs, key)
outputs = outputs[readout_index]
ce = optax.sigmoid_binary_cross_entropy(outputs, kwargs[key])
return ce
def keyed_map(
key: str, outputs: hubert.HubertOutput, readout_index: int = 0, **kwargs
) -> jnp.ndarray | None:
outputs = getattr(outputs, key)
outputs = outputs[readout_index]
return metrics.average_precision(scores=outputs, labels=kwargs[key])
def final_loss(
outputs: hubert.HubertOutput,
alpha: float,
quant_loss_mult: float,
readout_loss_mult: float,
hubert_loss_mult: float,
**kwargs_for_supervised,
) -> jnp.ndarray | None:
"""Get the final loss to use for training."""
# [bsz, sz].
quant_loss = quantizer_loss(outputs, quant_loss_mult)
if not hubert_loss_mult and not readout_loss_mult:
return quant_loss
# [bsz, sz].
readout_loss = supervised_loss(
outputs, readout_loss_mult=readout_loss_mult, **kwargs_for_supervised
)
# [nq, ns, bsz, sz].
hubert_loss = hubert_loss_from_outputs(
outputs, alpha, hubert_loss_mult=hubert_loss_mult
)
# Make the shapes match so that these losses can be added elementwise.
nq, ns, _, _ = hubert_loss.shape
quant_loss = jnp.repeat(jnp.expand_dims(quant_loss, 0), ns, axis=0)
quant_loss = jnp.repeat(jnp.expand_dims(quant_loss, 0), nq, axis=0)
readout_loss = jnp.repeat(jnp.expand_dims(readout_loss, 0), ns, axis=0)
readout_loss = jnp.repeat(jnp.expand_dims(readout_loss, 0), nq, axis=0)
return quant_loss + hubert_loss + readout_loss
def cluster_targets_metrics(
outputs: hubert.HubertOutput, key: str, **unused_kwargs
) -> jnp.ndarray | None:
"""Get the final loss to use for training."""
del unused_kwargs
assert key.startswith((
"n_masked_per_sample",
"n_per_cluster",
"max_per_cluster",
"min_per_cluster",
"h_diversity",
))
# A list of [ns, bsz, sz, nc].
all_targets = outputs.targets
mask_idc = outputs.mask_idc
n_masked_per_sample = jnp.sum(mask_idc, axis=1) # [bsz].
ret = {"n_masked_per_sample": n_masked_per_sample}
for i, targets in enumerate(all_targets):
nc = targets.shape[-1]
targets = jnp.reshape(targets, (-1, nc)) # [ns * bsz * sz, nc].
n_per_cluster = jnp.sum(targets, axis=0) # [nc].
max_per_cluster = jnp.max(n_per_cluster)
min_per_cluster = jnp.min(n_per_cluster)
diversity = jnp.mean(targets, axis=0) # [nc]
h_diversity = -jnp.sum(diversity * jnp.log2(diversity + 1e-8))
ret.update({
"n_per_cluster_{}".format(i): n_per_cluster,
"max_per_cluster_{}".format(i): max_per_cluster,
"min_per_cluster_{}".format(i): min_per_cluster,
"h_diversity_{}".format(i): h_diversity,
})
return ret[key]
def get_train_metrics(
keys: list[str],
num_labels: dict[str, int],
alpha: float,
readout_loss_mult: float,
hubert_loss_mult: float,
quantizer_points: list[int],
readout_points: list[int],
) -> dict[str, type[clu_metrics.Metric]]:
"""Create a collection of metrics with cross-entropy and average precision."""
metrics_ = {
"loss": clu_metrics.Average.from_output("loss"),
"learning_rate": clu_metrics.LastValue.from_output("learning_rate"),
"hubert_loss": clu_metrics.Average.from_fun(
functools.partial(
hubert_loss_from_outputs,
alpha=alpha,
hubert_loss_mult=hubert_loss_mult,
)
),
"quantizer_loss": clu_metrics.Average.from_output("quantizer_loss"),
"supervised_loss": clu_metrics.Average.from_fun(
functools.partial(
supervised_loss, readout_loss_mult=readout_loss_mult
)
),
}
for i, block_ind in enumerate(quantizer_points):
block_name = "late_fs_{}".format(block_ind) if block_ind >= 0 else "earlyfs"
metrics_.update({
"n_per_cluster_{}".format(block_name): clu_metrics.Average.from_fun(
functools.partial(
cluster_targets_metrics, key="n_per_cluster_{}".format(i)
)
),
"max_per_cluster_{}".format(block_name): clu_metrics.Average.from_fun(
functools.partial(
cluster_targets_metrics, key="max_per_cluster_{}".format(i)
)
),
"min_per_cluster_{}".format(block_name): clu_metrics.Average.from_fun(
functools.partial(
cluster_targets_metrics, key="min_per_cluster_{}".format(i)
)
),
"h_diversity_{}".format(block_name): clu_metrics.Average.from_fun(
functools.partial(
cluster_targets_metrics, key="h_diversity_{}".format(i)
)
),
})
for i, block_ind in enumerate(readout_points):
for key in keys:
metrics_.update({
f"{key}_{block_ind}_xentropy": utils.MultiAverage.create(
num_labels[key]
).from_fun(
functools.partial(keyed_cross_entropy, key=key, readout_index=i)
),
f"{key}_{block_ind}_map": clu_metrics.Average.from_fun(
functools.partial(keyed_map, key=key, readout_index=i)
),
})
return metrics_
class LearningRateSchedule(enum.Enum):
"""A point in the architecture to add a quantizer."""
PIECEWISE_LINEAR = "piecewise_linear"
PIECEWISE_COSINE = "piecewise_cosine"
COSINE_DECAY = "cosine_decay"
# Projected gradient descent utilities
def mask_by_name(name, pytree):
"""Create a mask which is only true for leaves with the given name."""
flat_tree = traverse_util.flatten_dict(pytree)
mask = {k: k[-1] == name for k in flat_tree}
return traverse_util.unflatten_dict(mask)
def project(min_value: float, max_value: float) -> optax.GradientTransformation:
"""Optax gradient transformation that projects values within a range."""
def clip_value(updates, params):
return tree_util.tree_map(
lambda p, u: jnp.clip(p + u, min_value, max_value) - p, params, updates
)
return optax.stateless(clip_value)
def initialize_model(
model_config: config_dict.ConfigDict,
rng_seed: int,
input_shape: tuple[int, ...],
learning_rate: float,
start_learning_rate: float,
workdir: str,
learning_rate_schedule: LearningRateSchedule,
num_train_steps: int,
quantizer_config: config_dict.ConfigDict,
base_quantizer_config: config_dict.ConfigDict,
frontend_config: config_dict.ConfigDict,
early_fs_config: config_dict.ConfigDict,
reload_quantizer_from: str,
reload_hubert_from: str,
reload_hubert_omit_quantizers: bool,
target_class_list: str,
early_fs_class: Callable[..., Any] | None = layers.EarlyFeatureExtractor,
**unused_kwargs,
):
"""Creates model for training, eval, or inference."""
del unused_kwargs
# Initialize random number generator
key = random.PRNGKey(rng_seed)
# Load model
model_init_key, mask_key = random.split(key)
class_lists = class_utils.get_class_lists(target_class_list, True)
num_classes = {k: len(v.classes) for (k, v) in class_lists.items()}
# Initialize the quantizer.
if quantizer_config.use_entropy_quantizer:
kwargs = {
"num_centroids": base_quantizer_config.num_centroids,
"gamma": base_quantizer_config.gamma,
}
quantizer_class = quantizers.VectorQuantizerEnt
else:
kwargs = {
"num_centroids": base_quantizer_config.num_centroids,
"demean": True,
"rescale": True,
}
quantizer_class = quantizers.VectorQuantizer
quantizer_list = []
for _ in range(len(model_config.quantizer_points)):
if (
quantizer_config.strategy
== quantizers.QuantizationStrategy.PRODUCT_QUANTIZATION.value
):
base_quantizers = [
quantizer_class(**kwargs)
for _ in range(quantizer_config.num_sections)
]
quantizer = quantizers.ProductQuantizer(base_quantizers=base_quantizers)
elif (
quantizer_config.strategy
== quantizers.QuantizationStrategy.RESIDUAL_QUANTIZATION.value
):
base_quantizers = [
quantizer_class(**kwargs)
for _ in range(quantizer_config.num_sections)
]
quantizer = quantizers.ResidualQuantizer(quantizers=base_quantizers)
quantizer_list.append(quantizer)
# Initialize the frontend.
frontend = None
if not frontend_config.omit_frontend:
frontend = frontend_models.MelSpectrogram(
features=frontend_config.features,
stride=frontend_config.stride,
kernel_size=frontend_config.kernel_size,
sample_rate=frontend_config.sample_rate,
freq_range=frontend_config.freq_range,
scaling_config=frontend_config.scaling_config,
)
# Initialize the early feature extractor.
if model_config.use_raw_audio:
if early_fs_config.omit_earlyfs:
raise ValueError(
"Expected the early feature extractor to be provided if "
"using raw audio."
)
if (
hubert.QuantizerPoints.FRONTEND.value in model_config.quantizer_points
and frontend is None
):
raise ValueError(
"Expected frontend to be provided in order to "
"perform quantization on the frontend outputs."
)
# The original architecture, from wav2vec, which leads to 500 frames.
conv_layer_tuples = tuple([
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
])
early_fs = early_fs_class(
dropout_prob=early_fs_config.dropout_prob,
activation=early_fs_config.activation,
conv_layer_tuples=conv_layer_tuples,
deprecated_group_conv=early_fs_config.deprecated_group_conv,
)
else:
if early_fs_config.omit_earlyfs:
early_fs = None
else:
if early_fs_config.num_frames not in [125, 63, 32, 16]:
raise ValueError(
"Expected early_fs_config.num_frames to be 125, 63, 32 or 16."
)
if frontend is None:
# Their original architecture led to 500 frames which caused OOM.
# Added 2 additional conv layers with stride 2 which makes it 125.
# Still was getting OOM with this with batch size 128, so reduced to 64.
conv_layer_tuples = tuple([
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
(512, 2, 2),
(512, 2, 2),
])
else:
nf = 512
if early_fs_config.num_frames == 125:
# With this configuration, the number of frames is reduced from 500 to
# 125 and the framerate is reduced from 100Hz (which the frontend
# outputs) 25Hz.
conv_layer_tuples = tuple([
(nf, 10, 2),
(nf, 3, 2),
(nf, 3, 1),
(nf, 3, 1),
(nf, 3, 1),
(nf, 2, 1),
(nf, 2, 1),
])
elif early_fs_config.num_frames == 63:
conv_layer_tuples = tuple([
(nf, 10, 2),
(nf, 3, 2),
(nf, 3, 2),
(nf, 3, 1),
(nf, 3, 1),
(nf, 2, 1),
(nf, 2, 1),
])
elif early_fs_config.num_frames == 32:
conv_layer_tuples = tuple([
(nf, 10, 2),
(nf, 3, 2),
(nf, 3, 2),
(nf, 3, 2),
(nf, 3, 1),
(nf, 2, 1),
(nf, 2, 1),
])
elif early_fs_config.num_frames == 16:
conv_layer_tuples = tuple([
(nf, 10, 2),
(nf, 3, 2),
(nf, 3, 2),
(nf, 3, 2),
(nf, 3, 2),
(nf, 2, 1),
(nf, 2, 1),
])
early_fs = early_fs_class(
dropout_prob=early_fs_config.dropout_prob,
activation=early_fs_config.activation,
conv_layer_tuples=conv_layer_tuples,
)
# Now set up the HuBERT model.
model = hubert.HuBERTModel(
num_classes=num_classes,
quantizer=quantizer_list,
frontend=frontend,
early_feature_extractor=early_fs,
**model_config,
)
variables = model.init(
model_init_key,
jnp.zeros((1,) + input_shape),
train=False,
mask_key=mask_key,
train_mode_quantizer=False,
)
model_state, params = flax.core.pop(variables, "params")
# NOTE: https://github.com/deepmind/optax/issues/160
params = flax.core.unfreeze(params)
# Define the learning rate schedule for HuBERT.
learning_rate_schedule = LearningRateSchedule(learning_rate_schedule)
if learning_rate_schedule is LearningRateSchedule.PIECEWISE_LINEAR:
# peak_scaling factor is such that if we multiply the initial learning rate
# with it, we get the intended peak learning rate.
peak_scaling_factor = learning_rate / start_learning_rate
learning_rate = optax.piecewise_interpolate_schedule(
"linear",
init_value=start_learning_rate,
boundaries_and_scales={
int(0.08 * num_train_steps): peak_scaling_factor,
num_train_steps: start_learning_rate,
},
)
elif learning_rate_schedule is LearningRateSchedule.COSINE_DECAY:
# only `start_learning_rate` and `num_train_steps` are used in this case.
learning_rate = optax.cosine_decay_schedule(
init_value=start_learning_rate,
decay_steps=num_train_steps,
)
else:
raise ValueError("unknown learning rate schedule")
# Initialize optimizer and handle constraints
optimizer = optax.adam(learning_rate=learning_rate)
opt_state = optimizer.init(params)
# Load checkpoint
ckpt = checkpoint.MultihostCheckpoint(workdir)
train_state = utils.TrainState(
step=0, params=params, opt_state=opt_state, model_state=model_state
)
did_reload = False
num_attempts = 0
while not did_reload and num_attempts < 5:
try:
train_state = ckpt.restore_or_initialize(train_state)
did_reload = True
break
except tf.errors.NotFoundError:
logging.warning(
"Reloading from %s failed. Taking a nap and will try again.", workdir
)
time.sleep(5)
except: # pylint: disable=bare-except
logging.warning(
(
"Reloading from %s failed for some unexpected reason. Taking a"
" nap and will try again."
),
workdir,
)
time.sleep(5)
num_attempts += 1
if reload_quantizer_from:
ckpt_to_reload = checkpoint.MultihostCheckpoint(reload_quantizer_from)
did_reload = False
num_attempts = 0
while not did_reload and num_attempts < 5:
try:
reloaded_quantizer = ckpt_to_reload.restore(None)
did_reload = True
break
except tf.errors.NotFoundError:
logging.warning(
"Reloading from %s failed. Taking a nap and will try again.",
reload_quantizer_from,
)
time.sleep(5)
num_attempts += 1
if "quantizer" in reloaded_quantizer["params"].keys():
quantizer_key = "quantizer"
elif "quantizer_0" in reloaded_quantizer["params"].keys():
quantizer_key = "quantizer_0"
else:
raise RuntimeError(
"Unsure which parameters correspond to the quantizer, "
"so unable to reload it. The reloaded params do not contain a key "
"'quantizer' nor 'quantizer_0'."
)
train_state.params[quantizer_key] = reloaded_quantizer["params"][ # pytype: disable=unsupported-operands # py310-upgrade
quantizer_key
]
if reload_hubert_from:
ckpt_to_reload = checkpoint.MultihostCheckpoint(reload_hubert_from)
did_reload = False
num_attempts = 0
while not did_reload and num_attempts < 5:
try:
reloaded_hubert = ckpt_to_reload.restore(None)
did_reload = True
break
except tf.errors.NotFoundError:
logging.warning(
"Reloading from %s failed. Taking a nap and will try again.",
reload_hubert_from,
)
time.sleep(5)
num_attempts += 1
logging.info(
"Reloaded HuBERT params with keys %s", reloaded_hubert["params"].keys()
)
for k, v in reloaded_hubert["params"].items():
# Since this reloading is done for continuing to train HuBERT with a new
# quantizer (in a different space), we assume it's best to re-initialize
# the projections between the features and these new codes.
if reload_hubert_omit_quantizers and (
k.startswith("codes_proj")
or k.startswith("final_proj")
or k.startswith("quantizer")
):
logging.info("Ignoring HuBERT parameters for key %s.", k)
continue
train_state.params[k] = (
v # pytype: disable=unsupported-operands # py310-upgrade
)
logging.info("Assigned reloaded HuBERT parameters for key %s.", k)
return (
utils.ModelBundle(model=model, key=key, ckpt=ckpt, optimizer=optimizer),
train_state,
learning_rate,
)
def train(
model_bundle,
train_state,
learning_rate_schedule,
train_dataset,
num_train_steps: int,
logdir: str,
log_every_steps: int,
checkpoint_every_steps: int,
num_quantizer_pretrain_steps: int,
quant_loss_mult: float,
readout_loss_mult: float,
hubert_loss_mult: float,
reload_quantizer=False,
) -> None:
"""Train a model.
Args:
model_bundle: Static objects for conducting the experiment.
train_state: Initial utils.TrainState.
learning_rate_schedule: The schedule for the learning rate.
train_dataset: Training dataset.
num_train_steps: The number of training steps.
logdir: Directory to use for logging.
log_every_steps: Write the training minibatch loss.
checkpoint_every_steps: Checkpoint the model and training state.
num_quantizer_pretrain_steps: The number of steps to train the quantizer
only before begining to train all parameters end-to-end.
quant_loss_mult: The multiplier for the quantizer loss in the combined loss
used for training.
readout_loss_mult: The multiplier for the readout loss in the combined loss
used for training.
hubert_loss_mult: The multiplier for the HuBERT loss in the combined loss
used for training.
reload_quantizer: Whether to reload a pre-trained quantizer. If this is the
case, it is kept frozen.
"""
if reload_quantizer and num_quantizer_pretrain_steps:
raise ValueError(
"Cannot have both num_quantizer_steps being nonzero and "
"reload_quantizer being True."
)
train_iterator = train_dataset.as_numpy_iterator()
taxonomy_keys = ["label"]
taxonomy_loss_weight = model_bundle.model.taxonomy_loss_weight
if taxonomy_loss_weight != 0.0:
taxonomy_keys += utils.TAXONOMY_KEYS
train_metrics_collection = utils.NestedCollection.create(
**get_train_metrics(
taxonomy_keys,
model_bundle.model.num_classes,
alpha=model_bundle.model.alpha,
readout_loss_mult=readout_loss_mult,
hubert_loss_mult=hubert_loss_mult,
quantizer_points=model_bundle.model.quantizer_points,
readout_points=model_bundle.model.readout_points,
)
)
@functools.partial(jax.pmap, axis_name="batch", static_broadcasted_argnums=0)
def update_step(quantizer_pretrain, key, batch, train_state, mask_key):
dropout_key, low_pass_key = random.split(key)
def step(params, model_state):
variables = {"params": params, **model_state}
x = jnp.squeeze(batch["audio"])
model_outputs, model_state = model_bundle.model.apply(
variables,
x,
train=True,
mask_key=mask_key,
train_mode_quantizer=True,
mutable=list(model_state.keys()),
rngs={
"dropout": dropout_key,
"low_pass": low_pass_key,
},
)
quantizer_loss_ = quantizer_loss(
model_outputs, quant_loss_mult=quant_loss_mult
)
final_loss_ = final_loss(
model_outputs,
taxonomy_loss_weight=taxonomy_loss_weight,
alpha=model_bundle.model.alpha,
quant_loss_mult=quant_loss_mult,
readout_loss_mult=readout_loss_mult,
hubert_loss_mult=hubert_loss_mult,
**batch,
)
train_metrics = train_metrics_collection.gather_from_model_output(
outputs=model_outputs,
loss=final_loss_,
quantizer_loss=quantizer_loss_,
learning_rate=learning_rate_schedule(train_state.step),
taxonomy_loss_weight=taxonomy_loss_weight,
**batch,
# CmAP expects logits to be passed as dict instead of dataclass
**output.logits(model_outputs),
)
loss = quantizer_loss_ if quantizer_pretrain else final_loss_
return jnp.mean(loss), (train_metrics, model_state)
# model_state has only the batch_norm stats which only appear in the
# late feature extractor (conformer).
grads, (train_metrics, model_state) = jax.grad(step, has_aux=True)(
train_state.params, train_state.model_state
)
grads = jax.lax.pmean(grads, axis_name="batch")
updates, opt_state = model_bundle.optimizer.update(
grads, train_state.opt_state, train_state.params
)
params = optax.apply_updates(train_state.params, updates)
train_state = utils.TrainState(
step=train_state.step + 1,
params=params,
opt_state=opt_state,
model_state=model_state,
)
return train_metrics, train_state
initial_step = int(train_state.step)
train_state = flax_utils.replicate(train_state)
# Logging
writer = metric_writers.create_default_writer(logdir)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
# Training and evaluation loop
key = model_bundle.key
for step in range(initial_step, num_train_steps + 1):
with jax.profiler.StepTraceAnnotation("train", step_num=step):
batch = next(train_iterator)
step_key, mask_key, key = random.split(key, num=3)
mask_key = random.split(mask_key, num=jax.local_device_count())
step_key = random.split(step_key, num=jax.local_device_count())
quantizer_pretrain = step < num_quantizer_pretrain_steps
train_metrics, train_state = update_step(
quantizer_pretrain, step_key, batch, train_state, mask_key
)
if step % log_every_steps == 0:
utils.write_metrics(
writer,
step,
flax_utils.unreplicate(train_metrics).compute(prefix="train"),
)
reporter(step)
if (step + 1) % checkpoint_every_steps == 0 or step == num_train_steps:
with reporter.timed("checkpoint"):
model_bundle.ckpt.save(flax_utils.unreplicate(train_state))
writer.close()
def evaluate(
model_bundle: utils.ModelBundle,
train_state: utils.TrainState,
learning_rate_schedule: optax.Schedule,
valid_dataset: tf.data.Dataset,
workdir: str,
num_train_steps: int,
eval_steps_per_checkpoint: int | None = None,
train_mode_at_eval: bool | None = False,
mask_at_eval: bool | None = False,
name: str = "valid",
eval_sleep_s: int = EVAL_LOOP_SLEEP_S,
):
"""Run evaluation."""
quant_loss_mult, readout_loss_mult, hubert_loss_mult = 1, 1, 1
taxonomy_keys = ["label"]
taxonomy_loss_weight = model_bundle.model.taxonomy_loss_weight
if taxonomy_loss_weight != 0.0:
taxonomy_keys += utils.TAXONOMY_KEYS
metrics_ = get_train_metrics(
taxonomy_keys,
model_bundle.model.num_classes,
alpha=model_bundle.model.alpha,
readout_loss_mult=readout_loss_mult,
hubert_loss_mult=hubert_loss_mult,
quantizer_points=model_bundle.model.quantizer_points,
readout_points=model_bundle.model.readout_points,
)
rank_metrics = {}
for key in taxonomy_keys:
rank_metrics[f"{key}_cmap"] = (
(f"{key}_logits", key),
metrics.cmap,
)
rank_metrics[f"{key}_roc_auc"] = (
(f"{key}_logits", key),
metrics.roc_auc,
)
metrics_["rank_metrics"] = utils.CollectingMetrics.from_funs(**rank_metrics)
valid_metrics_collection = utils.NestedCollection.create(**metrics_)
@functools.partial(jax.pmap, axis_name="batch")
def get_metrics(batch, train_state, mask_key):
variables = {"params": train_state.params, **train_state.model_state}
mutable = (
list(train_state.model_state.keys()) if train_mode_at_eval else False
)
model_outputs = model_bundle.model.apply(
variables,
batch["audio"],
train=train_mode_at_eval,
mask_key=mask_key,
train_mode_quantizer=False,
mutable=mutable,
)
if mutable:
# Both model outputs and state are returned if `mutable` was given.
model_outputs = model_outputs[0]
loss = final_loss(
model_outputs,
taxonomy_loss_weight=taxonomy_loss_weight,
alpha=model_bundle.model.alpha,
quant_loss_mult=quant_loss_mult,
readout_loss_mult=readout_loss_mult,
hubert_loss_mult=hubert_loss_mult,
**batch,
)
return valid_metrics_collection.gather_from_model_output(
outputs=model_outputs,
loss=loss,
quantizer_loss=quantizer_loss(model_outputs, quant_loss_mult),
learning_rate=learning_rate_schedule(train_state.step),
taxonomy_loss_weight=taxonomy_loss_weight,
# TODO(bartvm): This only calculates CmAP over the first readout layer
label_logits=model_outputs.label[0],
**batch,
)
writer = metric_writers.create_default_writer(workdir)
reporter = periodic_actions.ReportProgress(
num_train_steps=num_train_steps, writer=writer
)
for train_state in utils.checkpoint_iterator(
train_state, model_bundle.ckpt, workdir, num_train_steps, eval_sleep_s
):
step = int(train_state.step)
key = model_bundle.key
with reporter.timed("eval"):
valid_metrics = valid_metrics_collection.empty()
for s, batch in enumerate(valid_dataset.as_numpy_iterator()):
batch = jax.tree_map(np.asarray, batch)
mask_key = None
if mask_at_eval:
mask_key, key = random.split(key)
mask_key = random.split(mask_key, num=jax.local_device_count())
new_valid_metrics = get_metrics(
batch, flax_utils.replicate(train_state), mask_key
)
valid_metrics = valid_metrics.merge(
flax_utils.unreplicate(new_valid_metrics)
)
if (
eval_steps_per_checkpoint is not None
and s >= eval_steps_per_checkpoint
):
break
# Log validation loss
utils.write_metrics(writer, step, valid_metrics.compute(prefix=name))
writer.flush()
def export_tf_model(
model_bundle: utils.ModelBundle,
train_state: utils.TrainState,
workdir: str,
input_shape: tuple[int, ...],
num_train_steps,
eval_sleep_s=EVAL_LOOP_SLEEP_S,
):
"""Write a TF SavedModel."""
for train_state in utils.checkpoint_iterator(
train_state, model_bundle.ckpt, workdir, num_train_steps, eval_sleep_s
):
variables = {"params": train_state.params, **train_state.model_state}
def infer_fn(audio_batch):
model_outputs = model_bundle.model.apply(
variables, audio_batch, train=False # pylint: disable=cell-var-from-loop
)
return model_outputs.label
tf_predict = tf.function(
jax2tf.convert(infer_fn, enable_xla=False),
input_signature=[
tf.TensorSpec(
shape=(1,) + input_shape, dtype=tf.float32, name="input"
)
],
autograph=False,
)
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[tf_predict.get_concrete_function()], tf_predict
)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS, # enable TensorFlow ops.
]
tflite_float_model = converter.convert()
if not tf.io.gfile.exists(workdir):
tf.io.gfile.makedirs(workdir)
with tf.io.gfile.GFile(os.path.join(workdir, "model.tflite"), "wb") as f:
f.write(tflite_float_model)
def run(
mode: str,
config: config_dict.ConfigDict,
workdir: str,
tf_data_service_address: str,
) -> None:
"""Run the experiment."""
if mode.startswith("eval_"):
mode, name = mode.split("_", maxsplit=1)
config.eval_dataset_config = getattr(config.eval_dataset_config, name)
else:
name = "valid"
if mode == "train":
train_dataset, dataset_info = data_utils.get_dataset(
is_train=True,
tf_data_service_address=tf_data_service_address,
**config.train_dataset_config,
)
elif mode in ["eval", "tune_eval_hypers"]:
valid_dataset, dataset_info = data_utils.get_dataset(
**config.eval_dataset_config
)
elif mode == "export":
valid_dataset, dataset_info = None, None
if (
dataset_info is not None
and dataset_info.features["audio"].sample_rate != config.sample_rate_hz
):
raise ValueError(
"Dataset sample rate must match config sample rate. To address this, "
"need to set the sample rate in the config to {}.".format(
dataset_info.features["audio"].sample_rate
)
)
reload_quantizer = False
if config.init_config.reload_quantizer_from:
reload_quantizer = True
# Adjust the multiplier of the quantizer loss such that the quantizer gets the
# intended starting learning rate.
quant_start_lr = config.init_config.quant_start_learning_rate
start_lr = config.init_config.start_learning_rate
quant_loss_mult = quant_start_lr / start_lr
quant_loss_mult *= config.train_config.quant_loss_mult
# Initialize.
if mode == "tune_eval_hypers":
# Here, workdir is provided in the init config.
model_bundle, train_state, learning_rate_schedule = initialize_model(
num_train_steps=config.train_config.num_train_steps,
**config.init_config,
)
else:
model_bundle, train_state, learning_rate_schedule = initialize_model(
workdir=workdir,
num_train_steps=config.train_config.num_train_steps,
**config.init_config,
)
if mode == "train":
train(
model_bundle,
train_state,
learning_rate_schedule,
train_dataset,
reload_quantizer=reload_quantizer,
logdir=workdir,
num_train_steps=config.train_config.num_train_steps,
log_every_steps=config.train_config.log_every_steps,
checkpoint_every_steps=config.train_config.checkpoint_every_steps,
num_quantizer_pretrain_steps=config.train_config.num_quantizer_pretrain_steps,
quant_loss_mult=quant_loss_mult,
readout_loss_mult=config.train_config.readout_loss_mult,
hubert_loss_mult=config.train_config.hubert_loss_mult,
)
elif mode == "tune_eval_hypers":
# Running a single round of evaluation (as opposed to running eval in a
# loop whenever a new checkpoint is produced).
# This is used to tune HuBERT's evaluation hypers once.
train_state = model_bundle.ckpt.restore(train_state)
evaluate(
model_bundle,
flax_utils.replicate(train_state),
learning_rate_schedule,
valid_dataset,
workdir=workdir,
train_mode_at_eval=config.eval_config.train_mode_at_eval,
mask_at_eval=config.eval_config.mask_at_eval,
name=name,
# Setting num_train_steps=0 will run eval exactly once.
num_train_steps=0,
)
elif mode == "eval":
evaluate(
model_bundle,
train_state,
learning_rate_schedule,
valid_dataset,
workdir=workdir,
name=name,
**config.eval_config,
)
elif mode == "export":
export_tf_model(
model_bundle,
train_state,
workdir=workdir,
**config.export_config,
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model utils and colab helpers."""
import collections
import glob
import json
import os
from absl import logging
from chirp.birb_sep_paper import audio_ops
from etils import epath
from ml_collections import config_dict
import numpy as np
import tensorflow
tf = tensorflow.compat.v1
tf2 = tensorflow.compat.v2
ClassifierState = collections.namedtuple(
'ClassifierState',
[
'session',
'audio_placeholder',
'melspec_placeholder',
'hints_placeholder',
'melspec_output',
'logits',
'all_outputs',
],
)
SeparatorState = collections.namedtuple(
'SeparatorState', ['session', 'audio_placeholder', 'output_tensor']
)
def load_params_from_json(model_dir, filename='hyper_params.json'):
"""Read hyperparams from json file in the model_dir."""
if os.path.exists(os.path.join(model_dir, filename)):
filepath = os.path.join(model_dir, filename)
elif os.path.exists(os.path.join(model_dir, 'run_00', filename)):
filepath = os.path.join(model_dir, 'run_00', filename)
else:
raise ValueError('Could not find hyper_params file.')
with tf2.io.gfile.GFile(filepath) as f:
json_str = f.read()
params_dict = json.loads(json_str)
return config_dict.ConfigDict(params_dict)
def audio_to_input_fn(
audio,
dataset_params,
interval_s=4,
sample_rate_hz=44100,
max_intervals=10,
batch_size=None,
hints=None,
):
"""Perform peak-finding segmentation, batch segments."""
if batch_size is None:
batch_size = 4
intervals = audio_ops.SlicePeakedAudio(
audio,
sample_rate_hz=sample_rate_hz,
interval_s=interval_s,
max_intervals=max_intervals,
)
audio_batch = np.concatenate(
[np.expand_dims(v, 0) for v in intervals.values()], axis=0
)
if hints is None:
hints = np.ones([batch_size, dataset_params.n_classes])
def _map_features(features):
ms = audio_ops.GetAugmentedMelspec(
features['audio'],
dataset_params.sample_rate_hz,
dataset_params.melspec_params,
dataset_params.feature_cleaning,
dataset_params.filter_augment,
)
return {
'audio': features['audio'],
'melspec': ms,
'hints': hints,
}
def input_fn(params):
"""Input function wrapping the intervals."""
params = config_dict.ConfigDict(params)
dataset = tf.data.Dataset.from_tensors({'audio': np.float32(audio_batch)})
dataset = dataset.map(_map_features)
dataset = dataset.unbatch()
dataset = dataset.batch(batch_size)
return dataset
return input_fn, intervals.keys()
def build_optimizer(learning_rate, use_tpu):
"""build the optimizer."""
print('Defining optimizer...')
with tf.variable_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=0.01
)
# Importing contrib_estimator now fails.
# if clip_gradient > 0:
# optimizer = contrib_estimator.clip_gradients_by_norm(
# optimizer, clip_gradient)
if use_tpu:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
return optimizer
def mean_reciprocal_rank(logits, labels):
asortd = tf.argsort(logits, axis=1)
asortd = tf.argsort(asortd, axis=1)
asortd = -(asortd - logits.shape[1])
rnks = tf.reduce_sum(tf.to_float(asortd) * tf.to_float(labels), axis=1)
invrnks = tf.reciprocal(rnks)
return tf.reduce_mean(invrnks)
def map_k(labels_onehot, logits, k=1, name=''):
"""Finds mean average precision at k."""
# Need to convert one_hot labels to class ids.
labels_onehot = tf.cast(labels_onehot, tf.int64)
class_ids = tf.expand_dims(
tf.range(labels_onehot.shape[-1], dtype=tf.int64), 0
)
masked_class_ids = labels_onehot * class_ids
# Set the false labels to -1, since the zero label is allowed.
masked_class_ids += (labels_onehot - 1) * tf.ones(
labels_onehot.shape, tf.int64
)
final_map_k, map_k_update_op = tf.metrics.average_precision_at_k(
masked_class_ids, logits, k, name=name
)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, map_k_update_op)
return final_map_k
def _find_checkpoint(model_path: str) -> str:
# Publicly released model does not have a checkpoints directory file.
ckpt = None
for ckpt in sorted(tuple(epath.Path(model_path).glob('model.ckpt-*.index'))):
ckpt = ckpt.as_posix()[: -len('.index')]
if ckpt is None:
raise FileNotFoundError('Could not find checkpoint file.')
return ckpt
def load_separation_model(model_path):
"""Loads a separation model graph for inference."""
metagraph_path_ns = os.path.join(model_path, 'inference.meta')
checkpoint_path = _find_checkpoint(model_path)
graph_ns = tf.Graph()
sess_ns = tf.compat.v1.Session(graph=graph_ns)
with graph_ns.as_default():
new_saver = tf.train.import_meta_graph(metagraph_path_ns)
new_saver.restore(sess_ns, checkpoint_path)
input_placeholder_ns = graph_ns.get_tensor_by_name(
'input_audio/receiver_audio:0'
)
output_tensor_ns = graph_ns.get_tensor_by_name('denoised_waveforms:0')
return SeparatorState(sess_ns, input_placeholder_ns, output_tensor_ns)
def load_saved_model(sess, model_dir, inference_subdir='inference'):
"""Loads a model from a saved_model.pb file.
Args:
sess: The TensorFlow session where the loaded model will be run.
model_dir: Model directory.
inference_subdir: Subdirectory containing the saved_model.pb file.
Returns:
signature_def: A ProtoBuf of the signature definition from the
loaded graph definition.
"""
load_dir = os.path.join(model_dir, inference_subdir)
meta_graph_def = tf.saved_model.load(sess, [tf.saved_model.SERVING], load_dir)
signature_def = meta_graph_def.signature_def[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
return signature_def
def load_classifier_state(model_path, sample_rate=22050):
"""Load all classifier state for the given xid + run_num."""
cl_graph = tf.Graph()
cl_sess = tf.Session(graph=cl_graph)
with cl_graph.as_default():
# Need to convert to melspec for the classifier.
audio_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, None])
# TODO(tmd): Should read these from the model params instead of hardcoding.
melspec_params = {
'melspec_frequency': 100,
'upper_edge_hertz': 10000.0,
'scaling': 'pcen',
}
feature_cleaning = {
'strategy': 'whiten',
'clean_thresh': 1.0,
}
cl_melspec = audio_ops.GetAugmentedMelspec(
audio_placeholder, sample_rate, melspec_params, feature_cleaning, None
)
signature_def = load_saved_model(cl_sess, model_path)
saved_model_melspec_input = signature_def.inputs['melspec_input'].name
output_logits = signature_def.outputs['logits_label_prediction'].name
if 'hints_input' in signature_def.inputs:
hints_input = signature_def.inputs['hints_input'].name
else:
hints_input = None
all_outputs = {
k: signature_def.outputs[k].name for k in signature_def.outputs
}
return ClassifierState(
cl_sess,
audio_placeholder,
saved_model_melspec_input,
hints_input,
cl_melspec,
output_logits,
all_outputs,
)
def load_classifier_ensemble(model_path, sample_rate=22050, max_runs=5):
"""Loads ensemble of classifiers."""
classifiers = {}
runs = glob.glob(os.path.join(model_path, 'run_*'))
runs = [r for r in runs if '.xid' not in r]
if not runs:
logging.info('Loading single classifier : %s', model_path)
classifiers['run_00'] = load_classifier_state(model_path, sample_rate)
else:
for run in runs[:max_runs]:
run_path = os.path.join(run)
logging.info('loading classifier : %s', run_path)
classifiers[run] = load_classifier_state(run_path, sample_rate)
return classifiers
def ensemble_classify(
audio_batch, classifier_states, hints=None, logits_key=None
):
"""Classify a batch of audio with the given set of classifiers."""
all_logits = None
if hints is not None and len(hints.shape) == 1:
# tile the hints.
hints = hints[np.newaxis, :]
hints = np.tile(hints, [audio_batch.shape[0], 1])
cl0 = list(classifier_states.values())[0]
melspec = cl0.session.run(
cl0.melspec_output, feed_dict={cl0.audio_placeholder: audio_batch}
)
for cl_state in classifier_states.values():
if logits_key is None:
target = cl_state.logits
else:
target = cl_state.all_outputs[logits_key]
if cl_state.hints_placeholder is not None:
got_logits = cl_state.session.run(
target,
feed_dict={
cl_state.melspec_placeholder: melspec,
cl_state.hints_placeholder: hints,
},
)
else:
got_logits = cl_state.session.run(
target, feed_dict={cl_state.melspec_placeholder: melspec}
)
got_logits = got_logits[:, np.newaxis]
if all_logits is None:
all_logits = got_logits
else:
all_logits = np.concatenate([all_logits, got_logits], axis=1)
return melspec, all_logits
def model_embed(
audio_batch, classifier_state, hints=None, output_key='pooled_embedding'
):
"""Use ClassifierState to compute an audio embedding."""
if hints is not None and len(hints.shape) == 1:
# tile the hints.
hints = hints[np.newaxis, :]
hints = np.tile(hints, [audio_batch.shape[0], 1])
melspec = classifier_state.session.run(
classifier_state.melspec_output,
feed_dict={classifier_state.audio_placeholder: audio_batch},
)
if classifier_state.hints_placeholder is not None:
embedding = classifier_state.session.run(
classifier_state.all_outputs[output_key],
feed_dict={
classifier_state.melspec_placeholder: melspec,
classifier_state.hints_placeholder: hints,
},
)
else:
embedding = classifier_state.session.run(
classifier_state.logits,
feed_dict={classifier_state.melspec_placeholder: melspec},
)
return embedding
def progress_dot(i, verbose=True, div=1):
"""Print a dot every so often, in rows of one hundred."""
if not verbose:
return
if div > 1:
i = i // div
elif (i + 1) % 1000 == 0:
print('*')
elif (i + 1) % 50 == 0:
print('.')
elif (i + 1) % 25 == 0:
print('.', end=' ')
else:
print('.', end='')
def ensemble_classify_batched(
audios, classifier_states, hints=None, batch_size=32, verbose=True
):
"""Ensemble classify by batching input audio."""
logits = None
mels = None
ds = tf.data.Dataset.from_tensor_slices(audios).batch(batch_size)
for i, batch in enumerate(ds):
new_mels, new_logits = ensemble_classify(
batch.numpy(), classifier_states, hints
)
if logits is None:
logits = new_logits
mels = new_mels
else:
logits = np.concatenate([logits, new_logits], axis=0)
mels = np.concatenate([mels, new_mels], axis=0)
progress_dot(i, verbose)
return mels, logits
def separate_windowed(
audio, separator_state, hop_size_s=2.5, window_size_s=5, sample_rate=22050
):
"""Separate a large audio file in windowed chunks."""
start_sample = 0
window_size = int(window_size_s * sample_rate)
hop_size = int(hop_size_s * sample_rate)
# Separate audio.
sep_chunks = []
raw_chunks = []
while start_sample + window_size <= audio.shape[0] or not raw_chunks:
audio_chunk = audio[start_sample : start_sample + window_size]
raw_chunks.append(audio_chunk[np.newaxis, :])
separated_audio = separator_state.session.run(
separator_state.output_tensor,
feed_dict={
separator_state.audio_placeholder: audio_chunk[
np.newaxis, np.newaxis, :
]
},
)
sep_chunks.append(separated_audio)
start_sample += hop_size
if not raw_chunks:
return None, None
raw_chunks = np.concatenate(raw_chunks, axis=0)
sep_chunks = np.concatenate(sep_chunks, axis=0)
return sep_chunks, raw_chunks
def separate_classify(
audio,
classifier_states,
separator_state,
hints=None,
batch_size=4,
hop_size_s=2.5,
window_size_s=5,
sample_rate=22050,
verbose=False,
):
"""Separate and classify an audio array."""
sep_chunks, raw_chunks = separate_windowed(
audio, separator_state, hop_size_s, window_size_s, sample_rate
)
if raw_chunks is None:
return None, None
# Run classifiers on chunks.
big_batch = np.reshape(
sep_chunks, [sep_chunks.shape[0] * sep_chunks.shape[1], -1]
)
sep_mels, sep_logits = ensemble_classify_batched(
big_batch,
classifier_states,
hints=hints,
batch_size=batch_size,
verbose=verbose,
)
sep_mels = np.reshape(
sep_mels,
[
sep_chunks.shape[0],
sep_chunks.shape[1],
sep_mels.shape[-2],
sep_mels.shape[-1],
],
)
sep_logits = np.reshape(
sep_logits,
[
sep_chunks.shape[0],
sep_chunks.shape[1],
len(classifier_states),
sep_logits.shape[-1],
],
)
raw_mels, raw_logits = ensemble_classify_batched(
raw_chunks,
classifier_states,
hints=hints,
batch_size=batch_size,
verbose=verbose,
)
raw_logits = raw_logits[:, np.newaxis, :]
stacked_mels = np.concatenate([raw_mels[:, np.newaxis], sep_mels], axis=1)
stacked_logits = np.concatenate([raw_logits, sep_logits], axis=1)
reduced_logits = np.mean(stacked_logits, axis=2)
reduced_logits = np.max(reduced_logits, axis=1)
# Use the raw_logits score for the unknown class.
reduced_logits[:, 0] = np.mean(raw_logits, axis=1)[:, 0, 0]
return stacked_mels, reduced_logits
def saved_model_prediction(model_dir, mels, hints=None, batch_size=8):
"""Run inference on the set of numpy melspec features."""
scores = []
with tf.Graph().as_default():
with tf.Session() as sess:
signature_def = load_saved_model(sess, model_dir)
ms_name = signature_def.inputs['melspec_input'].name
output_name = signature_def.outputs['logits_label_prediction'].name
use_hints = 'hints_input' in signature_def.inputs
# Handle species hinting inputs.
if use_hints:
n_classes = (
signature_def.inputs['hints_input'].tensor_shape.dim[-1].size
)
hints = np.ones([n_classes], np.float32)
dataset_dict = {
'melspec': mels,
}
dataset = tf.data.Dataset.from_tensor_slices(dataset_dict)
dataset = dataset.batch(batch_size)
it = dataset.make_one_shot_iterator().get_next()
while True:
try:
features = sess.run(it)
except tf.errors.OutOfRangeError:
break
feed = {ms_name: features['melspec']}
if use_hints:
hint_name = signature_def.inputs['hints_input'].name
batch_hints = np.tile(
hints[np.newaxis, :], [features['melspec'].shape[0], 1]
)
feed[hint_name] = batch_hints
pred = sess.run(output_name, feed_dict=feed)
scores.append(pred)
return np.concatenate(scores, axis=0)
def add_histogram(name, tensor, use_tpu, histogram_vars, add_tpu_summary=False):
tensor = tf.check_numerics(tensor, 'check ' + name)
if not use_tpu:
tf.summary.histogram(name, tensor)
elif add_tpu_summary:
histogram_vars.append((name, tensor))
return tensor
def add_scalar(name, tensor, scalar_vars, use_tpu):
if use_tpu:
scalar_vars.append((name, tf.expand_dims(tensor, 0)))
else:
tf.summary.scalar(name, tensor)
return tensor
def make_eval_metrics(mode_key, model_dir, eval_dict):
"""Create an eval metrics map."""
tensor_map = {k: tf.expand_dims(v, 0) for k, v in eval_dict.items()}
tensor_map['global_step'] = tf.expand_dims(
tf.train.get_or_create_global_step(), 0
)
summary_path = os.path.join(model_dir, mode_key)
tf.logging.info('eval_metrics summary path: %s', summary_path)
def eval_metrics_fn(**tensor_map):
"""Eval function for CPU summaries."""
tf.logging.info('eval_metrics tensors: %s', tensor_map)
writer = tf2.summary.create_file_writer(summary_path, max_queue=1000)
eval_metric_ops = {}
with writer.as_default():
for name, tensor in tensor_map.items():
if name == 'global_step':
continue
eval_metric_ops[name] = tf.metrics.mean(tensor)
return eval_metric_ops
return eval_metrics_fn, tensor_map
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Descriptive dataset info."""
import dataclasses
import json
import os
import tensorflow as tf
@dataclasses.dataclass()
class DatasetInfo:
"""Describes dataset contents."""
sample_rate_hz: int = 22050
example_size_s: float = 6.0
comment: str = ''
label_set: tuple[str, ...] = ()
genus_set: tuple[str, ...] = ()
family_set: tuple[str, ...] = ()
order_set: tuple[str, ...] = ()
train_sstables: str = 'train_xc/tf.sstable-*'
noise_sstables: str = 'noise/tf.sstable-*'
eval_sstables: str = 'eval_xc/tf.sstable-*'
eval_ss_sstables: str = 'eval_ss/tf.sstable-*'
species_info_csv: str = 'species_info.csv'
def add_enums_from_taxonomy(self, taxo):
self.label_set = tuple(
[taxo.label_enum[i] for i in range(len(taxo.label_enum) // 2)]
)
self.genus_set = tuple(
[taxo.genus_enum[i] for i in range(len(taxo.genus_enum) // 2)]
)
self.family_set = tuple(
[taxo.family_enum[i] for i in range(len(taxo.family_enum) // 2)]
)
self.order_set = tuple(
[taxo.order_enum[i] for i in range(len(taxo.order_enum) // 2)]
)
def write(self, output_path, filename='info.json'):
with tf.io.gfile.GFile(os.path.join(output_path, filename), 'w') as f:
f.write(json.dumps(dataclasses.asdict(self)))
def read_dataset_info(info_path, filename='info.json'):
with tf.io.gfile.GFile(os.path.join(info_path, filename), 'r') as f:
data = json.loads(f.read())
return DatasetInfo(**data)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for reading/transcoding data from colossus."""
import re
import numpy as np
import scipy
from scipy.io import wavfile
import tensorflow as tf
LATLONG_REGEX = re.compile(r'\((\-?\d+\.\d+?),\s*(\-?\d+\.\d+?)\)')
TIME_OF_DAY_REGEX = re.compile(r'(\d{1,2}:\d\d)')
DATE_REGEX = re.compile(r'(\d\d\d\d-\d{1,2}-\d{1,2})')
ELEV_REGEX = re.compile(r'^(\d+)\s?m')
def LoadAudio(audio_path, target_sr):
"""LoadWavAudio loads a wav file from a path."""
if '.wav' in audio_path or '.WAV' in audio_path:
sr, audio = LoadWavAudio(audio_path, sample_rate=target_sr)
metadata = {}
else:
raise Exception('wrong file format, please use .wav')
if sr != target_sr:
raise Exception(
'got wrong sample rate (%s vs %s) from converted file: %s'
% (sr, target_sr, audio_path)
)
return audio, metadata
def CenteredRepeatPad(audio, target_length):
if audio.shape[0] >= target_length:
return audio
padded = audio
while padded.shape[0] < target_length:
padded = np.concatenate([audio, padded, audio])
midpoint = padded.shape[0] // 2
start = midpoint - target_length // 2
padded = padded[start : start + target_length]
return padded
def LoadWavAudio(path, sample_rate, bitdepth=16):
"""LoadWavAudio loads a wav file from a path.
Resamples to sample_rate, drops all but the 0th channel.
Args:
path: Location to load.
sample_rate: Target sample rate. Set to 0 to avoid resampling.
bitdepth: Scaling term.
Returns:
sample_rate: numpy array of samples.
array: ?
"""
sr, array = wavfile.read(path, mmap=True)
if len(array.shape) > 1:
array = array[:, 0]
array = 1.0 * array / 2**bitdepth
if sample_rate > 0 and sr != sample_rate:
target_samples = int(sample_rate / sr * array.shape[0])
array = scipy.signal.resample(array, target_samples)
return sample_rate, array
def BytesFeature(x, default=''):
if x is None:
x = default
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[x]))
def BytesRepFeature(x, default=None):
if default is None:
default = []
if x is None:
x = default
return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))
def FloatFeature(x, default=-1.0):
if x is None:
x = default
return tf.train.Feature(float_list=tf.train.FloatList(value=[x]))
def FloatsFeature(x, default=None):
if default is None:
default = []
if x is None:
x = default
return tf.train.Feature(float_list=tf.train.FloatList(value=x))
def IntFeature(x, default=-1):
if x is None:
x = default
if hasattr(x, 'count'):
return tf.train.Feature(int64_list=tf.train.Int64List(value=x))
return tf.train.Feature(int64_list=tf.train.Int64List(value=[x]))
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Per-Channel Energy Normalization (PCEN) ops.
See https://arxiv.org/abs/1607.05666 for details.
"""
import tensorflow
tf = tensorflow.compat.v1
def _swap_initial_and_time_axes(tensor):
"""Swaps the initial axis with the one that index STFT frame index.
This assumes that the axis at index -2 indexes STFT frames. The
tf.while_loop
in the PCEN Op will want to iterate over that axis for the smoothing filter
and for that also wants it to be axis 0. This method is intended to be
applied
before and after the while_loop and just swaps those two axes.
Args:
tensor: A tensor for which the axis to be smoothed over is at index -2. It
is expected but not required that its rank will be in {2, 3}.
Returns:
Transpose of tensor where axes (0, -2) have been swapped.
"""
if tensor.shape.rank is not None:
if tensor.shape.rank < 3:
return tensor
perm = list(range(tensor.shape.rank))
perm[0], perm[-2] = perm[-2], perm[0]
return tf.transpose(tensor, perm)
rank = tf.rank(tensor)
def return_original_tensor():
return tensor
def return_permuted_tensor():
perm = tf.range(rank)
# Overly complex way of swapping element 0 and -2.
perm = tf.concat([perm[-2:-1], perm[1:-2], perm[0:1], perm[-1:]], axis=0)
# It appears that, even when rank < 3, this path must still be valid. When
# rank < 3, the previous line will add an element to the perm list.
perm = perm[0:rank]
return tf.transpose(tensor, perm)
return tf.cond(
rank < 3, true_fn=return_original_tensor, false_fn=return_permuted_tensor
)
def fixed_pcen(
filterbank_energy,
alpha,
smooth_coef,
delta=2.0,
root=2.0,
floor=1e-6,
name=None,
streaming=False,
state=None,
):
"""Per-Channel Energy Normalization (PCEN) with fixed parameters.
See https://arxiv.org/abs/1607.05666 for details.
Args:
filterbank_energy: A [..., num_frames, num_frequency_bins] tensor of
power-domain filterbank energies. If a scalar, we return 0.0 as the
spectral floor value (for padding purposes).
alpha: The normalization coefficient.
smooth_coef: The coefficient of the IIR smoothing filter ($s$ in the paper).
delta: Constant stabilizer offset for the root compression.
root: Root compression coefficient.
floor: Epsilon floor value to prevent division by zero.
name: Optional scope name.
streaming: If true, also return a smoothing output so that this function can
be run on sequential chunks of audio, instead of processing all audio at
once.
state: Optional state produced by a previous call to fixed_pcen. Used in
streaming mode.
Returns:
Filterbank energies with PCEN compression applied (type and shape are
unchanged). If in streaming mode, also returns a state tensor to be used
in the next call to fixed_pcen.
"""
with tf.name_scope(name, 'pcen'):
filterbank_energy = tf.convert_to_tensor(filterbank_energy)
if filterbank_energy.shape.rank == 0:
return tf.constant(0.0, filterbank_energy.dtype)
filterbank_energy.shape.with_rank_at_least(2)
alpha = tf.convert_to_tensor(alpha, filterbank_energy.dtype, name='alpha')
alpha.shape.with_rank_at_most(1)
smooth_coef = tf.convert_to_tensor(
smooth_coef, filterbank_energy.dtype, name='smoothing_coefficient'
)
smooth_coef.shape.assert_has_rank(0)
delta = tf.convert_to_tensor(delta, filterbank_energy.dtype, name='delta')
delta.shape.with_rank_at_most(1)
root = tf.convert_to_tensor(root, filterbank_energy.dtype, name='root')
root.shape.with_rank_at_most(1)
floor = tf.convert_to_tensor(floor, filterbank_energy.dtype, name='floor')
floor.shape.assert_has_rank(0)
# Compute the smoothing filter.
transposed_energy = _swap_initial_and_time_axes(filterbank_energy)
timesteps = tf.shape(transposed_energy)[0]
filterbank_energy_ta = tf.TensorArray(
filterbank_energy.dtype, size=timesteps, clear_after_read=False
)
filterbank_energy_ta = filterbank_energy_ta.unstack(transposed_energy)
def compute_smoother():
"""Compute a first-order smoothing filter."""
if state is not None:
init_smoother = state
else:
init_smoother = filterbank_energy_ta.read(0)
def _cond(t, unused_smoother_ta, unused_prev_ret):
return t < timesteps
def _body(t, smoother_ta, prev_ret):
cur_ret = (
1.0 - smooth_coef
) * prev_ret + smooth_coef * filterbank_energy_ta.read(t)
smoother_ta = smoother_ta.write(t, cur_ret)
return t + 1, smoother_ta, cur_ret
smoother_ta = tf.TensorArray(
filterbank_energy.dtype, timesteps, clear_after_read=False
)
_, smoother_ta, final_smoother = tf.while_loop(
_cond,
_body,
loop_vars=[tf.constant(0, tf.int32), smoother_ta, init_smoother],
)
return _swap_initial_and_time_axes(smoother_ta.stack()), final_smoother
smoother, final_state = compute_smoother()
one_over_root = 1.0 / root
pcen = (
filterbank_energy / (floor + smoother) ** alpha + delta
) ** one_over_root - delta**one_over_root
if streaming:
return pcen, final_state
else:
return pcen
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert files to embedding vector sstables.
Example command line:
python -m beam_index \
--source_files=test_files/*.wav \
--model_path=models/taxo_lori_34078450 \
--separation_model_path=models/separator4 \
--output_dir=lorikeet_inference \
--hints_tag=eaus \
"""
import collections
import gc
import glob
import os
import time
from typing import Any
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import data_tools
import model_utils
import numpy as np
import taxonomy
import tensorflow as tf
FLAGS = flags.FLAGS
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
SPECIES_INFO_PATH = os.path.join(DATA_PATH, 'species_info.csv')
ENSEMBLE_SIZE = 3
flags.DEFINE_list('source_files', [], 'Source audio files (wav or mp3).')
flags.DEFINE_string(
'model_path', '', 'Where to find the model params and inference.pb'
)
flags.DEFINE_string(
'separation_model_path', '', 'Where to find the separation inference model.'
)
flags.DEFINE_string('output_dir', '', 'Where to dump output data.')
flags.DEFINE_string(
'embedding_key', 'hidden_embedding', 'Embedding output key in saved_model.'
)
flags.DEFINE_integer(
'file_shards', 48, 'Number of sub-jobs to divide each input file into.'
)
flags.DEFINE_string('hints_tag', '', 'Species set tag for hints.')
flags.DEFINE_boolean('dry_run', False, 'Whether to exit after dry-run.')
PredictionTuple = collections.namedtuple(
'PredictionTuple', ['file_id', 'start_time', 'end_time', 'logits']
)
class EmbedFn(beam.DoFn):
"""Beam function for model inference."""
def __init__(
self,
model_path,
embedding_key,
separation_model_path=None,
sample_rate=22050,
hints_tag=None,
):
# Get a local copy of the inference.pb file.
self.model_path = model_path
self.separation_model_path = separation_model_path
self.inference_path = os.path.join(model_path, 'inference')
self.embedding_key = embedding_key
self.sample_rate = sample_rate
self.hints_tag = hints_tag
def setup(self):
# tf.compat.v1.disable_eager_execution()
# admittedly a bit brittle...
self.model_params = model_utils.load_params_from_json(self.model_path)
self.taxo = taxonomy.Taxonomy(self.model_path, DATA_PATH, SPECIES_INFO_PATH)
self.taxo.PrintEnumSizes()
self.hints = self.taxo.MakeSpeciesHints(species_list_tag=self.hints_tag)
if self.separation_model_path:
self.separation_model = model_utils.load_separation_model(
self.separation_model_path
)
classifiers = model_utils.load_classifier_ensemble(
self.model_path, max_runs=1
)
self.embedding_model = list(classifiers.values())[0]
def get_hints(self, batch_size):
if self.hints is not None:
hints = self.hints[np.newaxis, :]
hints = np.tile(hints, [batch_size, 1])
else:
hints = np.ones([batch_size, self.taxo.NumLabels()])
return hints
def embed(self, file_id, audio, timestamp_offset):
"""Convert target audio to embeddings."""
window_size_s = self.model_params.dataset.window_size_s
hop_size_s = window_size_s / 2
logging.info('...starting separation (%s)', file_id)
sep_chunks, raw_chunks = model_utils.separate_windowed(
audio,
self.separation_model,
hop_size_s,
window_size_s,
self.sample_rate,
)
raw_chunks = raw_chunks[:, np.newaxis, :]
stacked_chunks = np.concatenate([raw_chunks, sep_chunks], axis=1)
n_chunks = stacked_chunks.shape[0]
n_channels = stacked_chunks.shape[1]
big_batch = np.reshape(stacked_chunks, [n_chunks * n_channels, -1])
# We often get memory blowups at this point; trigger a garbage collection.
gc.collect()
logging.info('...creating embeddings (%s)', file_id)
embedding = model_utils.model_embed(
big_batch,
self.embedding_model,
hints=self.get_hints(big_batch.shape[0]),
output_key=self.embedding_key,
)
embedding = np.reshape(embedding, [n_chunks, n_channels, -1])
print('embedding shape : ', embedding.shape)
serialized_embedding = tf.io.serialize_tensor(embedding)
feature = {
'file_id': data_tools.BytesFeature(bytes(file_id, encoding='utf8')),
'timestamp_offset': data_tools.IntFeature(timestamp_offset),
'embedding': data_tools.BytesFeature(serialized_embedding.numpy()),
'embedding_shape': data_tools.IntFeature(embedding.shape),
}
ex = tf.train.Example(features=tf.train.Features(feature=feature))
beam.metrics.Metrics.counter('beaminference', 'segments_processed').inc()
return [ex]
@beam.typehints.with_output_types(Any)
def process(self, source_info, crop_s=-1):
audio_filepath, shard_num, num_shards = source_info
file_name = os.path.basename(audio_filepath)
file_id = file_name.split('.')[0]
try:
logging.info('...loading audio (%s)', audio_filepath)
audio, _ = data_tools.LoadAudio(audio_filepath, self.sample_rate)
except Exception as e: # pylint: disable=broad-except
beam.metrics.Metrics.counter('beaminference', 'load_audio_error').inc()
logging.error('Failed to load audio : %s', audio_filepath)
logging.exception('Load audio exception : %s', e)
return
if audio.shape[0] < 2 * self.model_params.dataset.window_size:
beam.metrics.Metrics.counter('beaminference', 'short_audio_error').inc()
logging.error('short audio file : %s', audio_filepath)
return
if num_shards > 1:
shard_len = audio.shape[0] // num_shards
timestamp_offset = shard_num * shard_len
audio = audio[timestamp_offset : timestamp_offset + shard_len]
else:
timestamp_offset = 0
if crop_s > 0:
audio = audio[: crop_s * self.sample_rate]
return self.embed(file_id, audio, timestamp_offset)
def get_counter(metrics, name):
counter = metrics.query(beam.metrics.MetricsFilter().with_name(name))[
'counters'
]
if not counter:
return 0
return counter[0].result
def main(unused_argv):
source_files = []
for pattern in FLAGS.source_files:
source_files += glob.glob(pattern)
print('Found %d source files.' % len(source_files))
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
source_file_splits = []
for s in source_files:
for i in range(FLAGS.file_shards):
source_file_splits.append((s, i, FLAGS.file_shards))
# Dry-run.
print('Starting dry run...')
test_fn = EmbedFn(
FLAGS.model_path,
FLAGS.embedding_key,
FLAGS.separation_model_path,
hints_tag=FLAGS.hints_tag,
)
test_fn.setup()
got_results = False
start = time.time()
print(source_file_splits[15])
for unused_p in test_fn.process(source_file_splits[15], crop_s=10):
got_results = True
elapsed = time.time() - start
if not got_results:
raise Exception('Something went wrong; no results found.')
test_fn.teardown()
print('Dry run successful! Party! Inference time : %5.3f' % elapsed)
if FLAGS.dry_run:
return
output_prefix = os.path.join(FLAGS.output_dir, 'embeddings')
pipeline = beam.Pipeline()
_ = (
pipeline
| beam.Create(source_file_splits)
| beam.ParDo(
EmbedFn(
FLAGS.model_path,
FLAGS.embedding_key,
FLAGS.separation_model_path,
hints_tag=FLAGS.hints_tag,
)
)
# When a file is corrupted and can't be loaded InferenceFn
# returns None. In this case the lambda below returns false, which then
# filters it out.
| beam.Filter(lambda x: x)
| beam.io.WriteToTFRecord(
output_prefix, coder=beam.coders.ProtoCoder(tf.train.Example)
)
)
pipeline.run()
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for managing taxonomy and enums.
Each tfRecord dataset encodes label, order, family, genus, and species with
separate enum values. This is a bit redundant, since we only really need to map
the label to the other parts of the taxonomy. However, it lets us avoid loading
up additional lookup tables when we ask the classifier to also predict other
levels of the taxonomy.
Basic idea for writing taxonomy enums:
* Construct a Taxonomy object T using a filepath containing only the
SPECIES_INFO_CSV.
* Each species has a common name, species code, genus, family and order.
The common name is hilariously unreliable, so we mainly use the species code
as the primary identifier of a species. Every species maps uniquely to a
genus, family, and order (in order of decreasing specificity).
* The SPECIES_INFO_CSV contains all info about all known species. Way more than
you want.
* Then create your own filtered list of species codes.
Feed them to T.GenerateEnums() to construct the enum lists, as described
above, and write them out to disk if desired.
Then to use the enums, construct the Taxonomy object, by pointing it at a
directory containing all of the output enums.
"""
import csv
import os
from absl import logging
import dataset_info
import numpy as np
SPECIES = 'species'
LABEL = 'speciesCode'
GENUS = 'genus'
FAMILY = 'family'
ORDER = 'order'
COMMON_NAME = 'common_name'
UNKNOWN = 'unknown'
NONBIRD = 'nonbird'
HUMAN = 'human'
class Taxonomy(object):
"""Manages taxonomy info."""
def __init__(self, model_path, data_path=None, species_info_path=None):
self.model_path = model_path
self.data_path = data_path
self.species_info_path = species_info_path
self.species_info = LoadSpeciesInfo(species_info_path)
if os.path.exists(os.path.join(self.model_path, 'info.json')):
self.LoadEnumsFromDatasetInfo()
else:
print('No info.json found.')
def __getitem__(self, key):
return self.species_info[key]
def NumLabels(self):
return len(self.label_enum) // 2
def PrintEnumSizes(self):
print('label : ', len(self.label_enum) // 2)
print('genus : ', len(self.genus_enum) // 2)
print('family : ', len(self.family_enum) // 2)
print('order : ', len(self.order_enum) // 2)
def CommonNameToSpeciesCode(self, common_name):
cn = common_name.strip().lower()
for k, v in self.species_info.items():
if v[COMMON_NAME] == cn:
return k
elif cn.replace('-', '') == k.replace('-', ''):
return k
return None
def BackgroundSpeciesLookup(self, bg_string):
"""Get species label from a background species string."""
if '(' not in bg_string:
return self.CommonNameToSpeciesCode(bg_string)
common, latin = bg_string.split('(')
common = common.strip().lower()
latin = latin.replace(')', '').strip().lower()
genus = latin.split(' ')[0].lower()
species = latin[len(genus) :].strip().lower()
for k, v in self.species_info.items():
if v[COMMON_NAME] == common:
return k
# Failed to find an exact match for the common name; try to find from latin.
for k, v in self.species_info.items():
if v[GENUS] != genus:
continue
if v[SPECIES].startswith(species) or species.startswith(v[SPECIES]):
# Consider it a match.
return k
return None
def BackgroundSpeciesToCodeList(self, bg_string, separator=';'):
code_list = []
for b in bg_string.split(separator):
code = self.BackgroundSpeciesLookup(b)
if code is not None:
code_list.append(code)
return code_list
def CodeListToEnumList(self, code_list):
"""Convert list of codes to enum list."""
enums = [self.label_enum.get(s, -1) for s in code_list]
enums = [x for x in enums if x >= 0]
return enums
def LoadEnumsFromDatasetInfo(self):
ds_info = dataset_info.read_dataset_info(self.model_path)
self.label_enum = {i: k for (i, k) in enumerate(ds_info.label_set)}
self.label_enum.update({k: i for (i, k) in enumerate(ds_info.label_set)})
self.genus_enum = {i: k for (i, k) in enumerate(ds_info.genus_set)}
self.genus_enum.update({k: i for (i, k) in enumerate(ds_info.genus_set)})
self.family_enum = {i: k for (i, k) in enumerate(ds_info.family_set)}
self.family_enum.update({k: i for (i, k) in enumerate(ds_info.family_set)})
self.order_enum = {i: k for (i, k) in enumerate(ds_info.order_set)}
self.order_enum.update({k: i for (i, k) in enumerate(ds_info.order_set)})
def GenerateEnum(
self, code_list, enum_type=ORDER, other_labels=None, code_whitelist=None
):
"""Create an Enum mapping for the provided list of species codes."""
if other_labels is None:
other_labels = [UNKNOWN, NONBIRD, HUMAN]
code_list = sorted(code_list)
enum = {}
if code_whitelist:
keys = [
self.species_info[c][enum_type]
for c in code_list
if c in code_whitelist
]
else:
keys = [self.species_info[c][enum_type] for c in code_list]
keys = sorted(list(set(keys)))
keys = other_labels + keys
for i, c in enumerate(keys):
enum[c] = i
enum[i] = c
return enum
def GenerateEnums(self, code_list, whitelist=None):
"""Generate enums from provided code list."""
if whitelist is None:
whitelist = {}
self.label_enum = self.GenerateEnum(
code_list, LABEL, code_whitelist=whitelist
)
self.order_enum = self.GenerateEnum(
code_list, ORDER, code_whitelist=whitelist
)
self.family_enum = self.GenerateEnum(
code_list, FAMILY, code_whitelist=whitelist
)
self.genus_enum = self.GenerateEnum(
code_list, GENUS, code_whitelist=whitelist
)
def TranslateLabelVector(self, label_vector, other_taxonomy_path):
"""Convert a label vector from another taxonomy to this one."""
taxo_old = Taxonomy(self.model_path, data_path=other_taxonomy_path)
if label_vector.shape[1] != taxo_old.NumLabels():
raise ValueError(
'Label vector for conversion has shape %s, but '
'the taxonomy has %d labels.'
% (label_vector.shape, taxo_old.NumLabels())
)
trans = np.zeros(
[taxo_old.NumLabels(), self.NumLabels()], label_vector.dtype
)
misses = []
for i in range(taxo_old.NumLabels()):
sp = taxo_old.label_enum[i]
new_index = self.label_enum.get(sp, 0)
if i > 0 and not new_index:
misses.append(sp)
trans[i, new_index] = 1
labels_new = np.matmul(label_vector, trans)
if misses:
print('Some species were not in this taxonomy : %s' % misses)
return labels_new
def MakeSpeciesHints(
self,
species_list=None,
species_list_tag='',
dataset_info_path='',
csv_path='',
):
"""Create a species hint vector from a provided list or taxonomy info."""
if (
species_list is None
and not dataset_info_path
and not csv_path
and not species_list_tag
):
logging.info('Using all-ones species hints.')
return np.ones([self.NumLabels()], np.float32)
hints = np.zeros([self.NumLabels()], np.float32)
if species_list_tag:
csv_fn = '%s_birds.csv' % species_list_tag.replace('taxo_', '')
csv_fp = os.path.join(self.data_path, csv_fn)
if os.path.exists(csv_fp):
with open(csv_fp) as f:
for r in f:
sp = r.strip()
if sp in self.label_enum:
hints[self.label_enum[sp]] = 1
else:
raise ValueError(
'File with the desired hints cannot be found : %s' % csv_fp
)
if csv_path:
with open(csv_path) as f:
for r in f:
sp = r.strip()
if sp in self.label_enum:
hints[self.label_enum[sp]] = 1
if dataset_info_path:
ds_info = dataset_info.read_dataset_info(dataset_info_path)
for sp in ds_info.label_set:
if sp in self.label_enum:
hints[self.label_enum[sp]] = 1
if species_list:
for sp in species_list:
if sp in self.label_enum:
hints[self.label_enum[sp]] = 1
if np.sum(hints) == 0:
raise ValueError('Tried loading hints, but no matches found.')
logging.info('Loaded %d hints.', np.sum(hints))
return hints
def LoadEnum(filepath):
"""Load an enum file into a two-way dict."""
enum = {}
with open(filepath) as c:
for row in c:
(i, v) = row.split(',')
index = int(i)
label = v.lower().strip()
enum[index] = label
enum[label] = index
return enum
def LoadSpeciesInfo(species_info_path=None):
"""Load a dict mapping species codes to full taxonomy info for the species."""
species_info = {}
if species_info_path and os.path.exists(species_info_path):
with open(species_info_path, 'rt') as c:
species_info_raw = c.read()
reader = csv.DictReader(species_info_raw.splitlines())
for row in reader:
species_info[row[LABEL]] = row
for k in ['none', 'unknown', 'human']:
species_info[k] = {
COMMON_NAME: k,
'enum': '0',
FAMILY: k,
GENUS: k,
ORDER: k,
SPECIES: k,
}
return species_info
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Inference.
Example command line:
python -m beam_inference \
--source_files=test_files/*.wav \
--model_path=models/taxo_lori_34078450 \
--separation_model_path=models/separator4 \
--output_dir=lorikeet_inference \
--target_species=blakit1 \
--hints_tag=eaus \
--min_logit=-3.0
"""
import collections
import gc
import glob
import os
import time
from typing import Any
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import data_tools
import model_utils
import numpy as np
import taxonomy
FLAGS = flags.FLAGS
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
SPECIES_INFO_PATH = os.path.join(DATA_PATH, 'species_info.csv')
ENSEMBLE_SIZE = 3
flags.DEFINE_list('source_files', [], 'Source audio files (wav or mp3).')
flags.DEFINE_string(
'model_path', '', 'Where to find the model params and inference.pb'
)
flags.DEFINE_string(
'separation_model_path', '', 'Where to find the separation inference model.'
)
flags.DEFINE_string(
'target_species', '', 'Species code for single-species mode.'
)
flags.DEFINE_string('output_dir', '', 'Where to dump output data.')
flags.DEFINE_integer('num_shards', 2000, 'Number of CSV output shards.')
flags.DEFINE_float(
'min_logit',
-1.0,
'Only emit predictions if a logit is above this threshold.',
)
flags.DEFINE_integer(
'file_shards', 48, 'Number of sub-jobs to divide each input file into.'
)
flags.DEFINE_string('hints_tag', '', 'Species set tag for hints.')
flags.DEFINE_boolean('dry_run', False, 'Whether to exit after dry-run.')
PredictionTuple = collections.namedtuple(
'PredictionTuple', ['file_id', 'start_time', 'end_time', 'logits']
)
class InferenceFn(beam.DoFn):
"""Beam function for model inference."""
def __init__(
self,
model_path,
separation_model_path=None,
min_logit=-20.0,
target_species='',
sample_rate=22050,
hints_tag=None,
):
# Get a local copy of the inference.pb file.
self.model_path = model_path
self.separation_model_path = separation_model_path
self.inference_path = os.path.join(model_path, 'inference')
self.min_logit = min_logit
self.target_species = target_species
self.sample_rate = sample_rate
self.hints_tag = hints_tag
def setup(self):
# tf.compat.v1.disable_eager_execution()
# admittedly a bit brittle...
self.model_params = model_utils.load_params_from_json(self.model_path)
self.taxo = taxonomy.Taxonomy(self.model_path, DATA_PATH, SPECIES_INFO_PATH)
if self.target_species and self.target_species not in self.taxo.label_enum:
raise ValueError(
'Target species %s not found in taxonomy label enum.'
% self.target_species
)
self.hints = self.taxo.MakeSpeciesHints(species_list_tag=self.hints_tag)
if self.separation_model_path:
self.separation_model = model_utils.load_separation_model(
self.separation_model_path
)
self.classifiers = model_utils.load_classifier_ensemble(
self.model_path, max_runs=ENSEMBLE_SIZE
)
def get_hints(self, batch_size):
if self.hints is not None:
hints = self.hints[np.newaxis, :]
hints = np.tile(hints, [batch_size, 1])
else:
hints = np.ones([batch_size, self.taxo.NumLabels()])
return hints
def infer_target_species(self, file_id, audio, timestamp_offset):
"""Create full taxonomy logits for the target species."""
window_size_s = self.model_params.dataset.window_size_s
hop_size_s = window_size_s / 2
logging.info('...starting separation (%s)', file_id)
sep_chunks, raw_chunks = model_utils.separate_windowed(
audio,
self.separation_model,
hop_size_s,
window_size_s,
self.sample_rate,
)
raw_chunks = raw_chunks[:, np.newaxis, :]
stacked_chunks = np.concatenate([raw_chunks, sep_chunks], axis=1)
n_chunks = stacked_chunks.shape[0]
n_channels = stacked_chunks.shape[1]
big_batch = np.reshape(stacked_chunks, [n_chunks * n_channels, -1])
# We often get memory blowups at this point; trigger a garbage collection.
gc.collect()
sp_info = self.taxo.species_info[self.target_species]
indices = {
'label': self.taxo.label_enum[self.target_species],
'genus': self.taxo.genus_enum[sp_info['genus']],
'family': self.taxo.family_enum[sp_info['family']],
'order': self.taxo.order_enum[sp_info['order']],
}
target_taxo_logits = {}
for logits_key, key_index in indices.items():
logging.info('...starting classification (%s, %s)', file_id, logits_key)
_, logits = model_utils.ensemble_classify(
big_batch,
self.classifiers,
hints=self.get_hints(big_batch.shape[0]),
logits_key=logits_key,
)
unbatched_logits = np.reshape(
logits,
[
n_chunks,
n_channels,
logits.shape[1], # ensemble
logits.shape[2], # num classes
],
)
# Take the mean logits over the ensemble.
unbatched_logits = np.mean(unbatched_logits, axis=2)
# Take the max logit over all separated and raw channels.
unbatched_logits = np.max(unbatched_logits, axis=1)
# Choose the logits for the target species.
target_logits = unbatched_logits[:, key_index]
# Apply time averaging.
target_logits = (target_logits[:-1] + target_logits[1:]) / 2
target_taxo_logits[logits_key] = target_logits
# All taxo logits should have the same shape: [T]
# Assemble into a single array.
all_logits = [
target_taxo_logits['label'][:, np.newaxis],
target_taxo_logits['genus'][:, np.newaxis],
target_taxo_logits['family'][:, np.newaxis],
target_taxo_logits['order'][:, np.newaxis],
]
all_logits = np.concatenate(all_logits, axis=1)
for i in range(all_logits.shape[0]):
if np.max(all_logits[i]) < self.min_logit:
continue
beam.metrics.Metrics.counter('beaminference', 'predictions').inc()
time_stamp = (i + 1) * hop_size_s + (timestamp_offset / self.sample_rate)
prediction = PredictionTuple(
file_id, time_stamp, time_stamp + hop_size_s, all_logits[i]
)
yield prediction_to_csv(prediction)
beam.metrics.Metrics.counter('beaminference', 'files_processed').inc()
def infer_all(
self,
audio_filepath,
audio,
file_id,
window_size_s,
hop_size_s,
timestamp_offset,
):
"""Create label logits for all species."""
start = time.time()
logging.info('...starting separate+classify (%s)', file_id)
_, reduced_logits = model_utils.separate_classify(
audio,
self.classifiers,
self.separation_model,
hop_size_s=hop_size_s,
window_size_s=window_size_s,
sample_rate=self.sample_rate,
hints=self.get_hints(1),
)
elapsed = time.time() - start
logging.info('finished separate+classify. %5.3fs elsapsed', elapsed)
beam.metrics.Metrics.distribution(
'beaminference', 'inference_duration_s'
).update(elapsed)
if reduced_logits is None:
beam.metrics.Metrics.counter('beaminference', 'no_logits_returned').inc()
logging.error('no logits from inference : %s', audio_filepath)
return
time_averaged_logits = (reduced_logits[:-1] + reduced_logits[1:]) / 2
for i in range(time_averaged_logits.shape[0]):
if np.max(time_averaged_logits[i]) < self.min_logit:
continue
beam.metrics.Metrics.counter('beaminference', 'predictions').inc()
time_stamp = (i + 1) * hop_size_s + (timestamp_offset / self.sample_rate)
prediction = PredictionTuple(
file_id, time_stamp, time_stamp + hop_size_s, time_averaged_logits[i]
)
yield prediction_to_csv(prediction)
beam.metrics.Metrics.counter('beaminference', 'files_processed').inc()
@beam.typehints.with_output_types(Any)
def process(self, source_info, crop_s=-1):
audio_filepath, shard_num, num_shards = source_info
file_name = os.path.basename(audio_filepath)
file_id = file_name.split('.')[0]
# self.sample_rate = self.model_params.dataset.sample_frequency
window_size_s = self.model_params.dataset.window_size_s
hop_size_s = window_size_s / 2
try:
logging.info('...loading audio (%s)', audio_filepath)
audio, _ = data_tools.LoadAudio(audio_filepath, self.sample_rate)
except Exception as e: # pylint: disable=broad-except
beam.metrics.Metrics.counter('beaminference', 'load_audio_error').inc()
logging.error('Failed to load audio : %s', audio_filepath)
logging.exception('Load audio exception : %s', e)
return
if audio.shape[0] < 2 * self.model_params.dataset.window_size:
beam.metrics.Metrics.counter('beaminference', 'short_audio_error').inc()
logging.error('short audio file : %s', audio_filepath)
return
if num_shards > 1:
shard_len = audio.shape[0] // num_shards
timestamp_offset = shard_num * shard_len
audio = audio[timestamp_offset : timestamp_offset + shard_len]
else:
timestamp_offset = 0
if crop_s > 0:
audio = audio[: crop_s * self.sample_rate]
if self.target_species:
for pred in self.infer_target_species(file_id, audio, timestamp_offset):
yield pred
else:
for pred in self.infer_all(
audio_filepath,
audio,
file_id,
window_size_s,
hop_size_s,
timestamp_offset,
):
yield pred
def prediction_to_csv(prediction):
logits = ['%1.3f' % l for l in prediction.logits]
csv_row = ','.join(
[
prediction.file_id,
'%1.5f' % prediction.start_time,
'%1.5f' % prediction.end_time,
]
+ logits
)
return csv_row
def get_counter(metrics, name):
counter = metrics.query(beam.metrics.MetricsFilter().with_name(name))[
'counters'
]
if not counter:
return 0
return counter[0].result
def main(unused_argv):
source_files = []
for pattern in FLAGS.source_files:
source_files += glob.glob(pattern)
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
print('Found %d source files.' % len(source_files))
source_file_splits = []
for s in source_files:
for i in range(FLAGS.file_shards):
source_file_splits.append((s, i, FLAGS.file_shards))
# Dry-run.
print('Starting dry run...')
test_fn = InferenceFn(
FLAGS.model_path,
FLAGS.separation_model_path,
target_species=FLAGS.target_species,
hints_tag=FLAGS.hints_tag,
)
test_fn.setup()
got_results = False
start = time.time()
print(source_file_splits[15])
for p in test_fn.process(source_file_splits[15], crop_s=10):
got_results = True
print(p)
elapsed = time.time() - start
if not got_results:
raise Exception('Something went wrong; no results found.')
test_fn.teardown()
print('Dry run successful! Party! Inference time : %5.3f' % elapsed)
if FLAGS.dry_run:
return
output_prefix = os.path.join(FLAGS.output_dir, 'predictions')
pipeline = beam.Pipeline()
_ = (
pipeline
| beam.Create(source_file_splits)
| beam.ParDo(
InferenceFn(
FLAGS.model_path,
FLAGS.separation_model_path,
min_logit=FLAGS.min_logit,
target_species=FLAGS.target_species,
hints_tag=FLAGS.hints_tag,
)
)
# When a file is corrupted and can't be loaded InferenceFn
# returns None. In this case the lambda below returns false, which then
# filters it out.
| beam.Filter(lambda x: x)
| beam.io.WriteToText(output_prefix, file_name_suffix='.csv')
)
pipeline.run()
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic reuable audio transformations. Like melspec."""
from chirp.birb_sep_paper import pcen_ops
import numpy as np
from scipy import signal
import tensorflow as tf
MELSPEC_PARAMS_BASE = {
'frame_length_secs': 0.08,
'lower_edge_hertz': 60.0,
'upper_edge_hertz': 10000.0,
}
def Melspec(
audio,
sample_frequency,
melspec_frequency=25,
frame_length_secs=0.08,
melspec_depth=160,
lower_edge_hertz=60.0,
upper_edge_hertz=7920.0,
log_floor=1e-2,
log_offset=0.0,
logmel_scalar=0.1,
pcen_alpha=0.5,
pcen_s=0.1,
pcen_beta=0.5,
pcen_delta=2.0,
pcen_floor=1e-6,
scaling='log',
batched=False,
):
"""Convert audio to melspectrogram, using params."""
# Add front padding so that mel window aligns with audio frame.
frame_step = int(sample_frequency / melspec_frequency)
frame_length = int(sample_frequency * frame_length_secs)
if not batched:
# Prepare shape for stft operation.
audio = tf.expand_dims(audio, 0)
num_padded_samples = int(0.5 * (frame_length - frame_step))
if num_padded_samples > 0:
padding = tf.zeros([tf.shape(audio)[0], num_padded_samples], audio.dtype)
audio = tf.concat([padding, audio], axis=1)
# stfts is a complex64 Tensor representing the Short-time Fourier Transform
# of audio. Its shape is [1, ?, num_spectrogram_bins]
stfts = tf.signal.stft(
audio, frame_length=frame_length, frame_step=frame_step, pad_end=True
)
# An energy spectrogram is the magnitude of the complex-valued STFT.
# A float32 Tensor of shape [batch_size, ?, num_spectrogram_bins].
magnitude_spectrograms = tf.abs(stfts)
num_spectrogram_bins = tf.shape(magnitude_spectrograms)[-1]
# Warp the linear-scale magnitude spectrograms into the mel-scale.
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
melspec_depth,
num_spectrogram_bins,
sample_frequency,
lower_edge_hertz,
upper_edge_hertz,
)
mel_spectrograms = tf.tensordot(
magnitude_spectrograms, linear_to_mel_weight_matrix, 1
)
mel_spectrograms.set_shape(
magnitude_spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]
)
)
if scaling == 'log':
# Mimics the stabilized log used in mel_utils:
# np.log(np.maximum(data, floor) + additive_offset)
x = tf.log(tf.maximum(mel_spectrograms, log_floor) + log_offset)
x = logmel_scalar * x
elif scaling == 'pcen':
x = pcen_ops.fixed_pcen(
mel_spectrograms,
alpha=pcen_alpha,
smooth_coef=pcen_s,
delta=pcen_delta,
root=(1.0 / pcen_beta),
floor=pcen_floor,
)
elif scaling == 'raw':
x = mel_spectrograms
else:
raise ValueError('Unrecognized melspectrogram scaling mode.')
num_frames = tf.shape(audio)[1] // sample_frequency * melspec_frequency + 1
x = x[:, :num_frames]
if not batched:
x = tf.squeeze(x, 0)
return x
def GetAugmentedMelspec(
audio, sample_frequency, melspec_params, feature_cleaning, filter_params
):
"""Build melspec, apply freq domain augmentation, then clean-up."""
batched = len(audio.shape) == 2
melspec_base = Melspec(
audio=audio,
batched=batched,
sample_frequency=sample_frequency,
**melspec_params
)
if not batched:
melspec_base = tf.expand_dims(melspec_base, 0)
if filter_params:
melspec_base = FilterAugmentMelspec(melspec_base, filter_params)
melspec_base = CleanupMelspec(melspec_base, feature_cleaning)
if not batched:
melspec_base = melspec_base[0]
return melspec_base
def CleanupMelspec(melspec, feature_cleaning):
"""Apply the chosen melspec cleanup technique."""
if 'strategy' not in feature_cleaning:
# Default to doing nothing.
return melspec
if feature_cleaning['strategy'] == 'whiten':
melspec = TwoStageWhitening(
melspec, thresh=feature_cleaning['clean_thresh']
)
elif feature_cleaning['strategy'] == 'denoise':
melspec = MixtureDenoise(melspec, thresh=feature_cleaning['clean_thresh'])
elif feature_cleaning['strategy'] == 'softmask':
raise ValueError('Softmask denoiser was removed.')
elif feature_cleaning and feature_cleaning['strategy']:
raise ValueError(
'Unknown feature cleaning strategy : %s' % feature_cleaning
)
return melspec
def FilterAugmentMelspec(melspec, filter_params):
"""Apply filtering augmentation to the melspec batch."""
if (
not filter_params
or not filter_params.strategy
or filter_params.filter_probability <= 0
):
return melspec
if filter_params.strategy == 'spec_augment':
filtered = ApplySpecAugment(melspec, batched=True, **filter_params)
elif filter_params.strategy == 'random_lowpass':
filters = LowpassFiltersBatch(tf.shape(melspec)[0], melspec.shape[-1])
# Add a time dimension for broadcasting.
filters = tf.expand_dims(filters, 1)
filtered = melspec * filters
else:
raise ValueError(
'Unknown filter augmentation strategy : %s' % filter_params.strategy
)
mask = tf.less_equal(
tf.random.uniform([tf.shape(melspec)[0]], 0.0, 1.0),
filter_params.filter_probability,
)
mask = tf.cast(tf.expand_dims(tf.expand_dims(mask, 1), 2), melspec.dtype)
melspec = mask * filtered + (1 - mask) * melspec
return melspec
def TwoStageWhitening(batched_melspec, thresh=0.5):
"""Remove mean and std from melspec, excluding large signal-like values."""
feature_mean = tf.expand_dims(tf.math.reduce_mean(batched_melspec, axis=1), 1)
feature_std = tf.expand_dims(tf.math.reduce_std(batched_melspec, axis=1), 1)
# Remove extreme outliers, and re-estimate mean and std.
mask = tf.cast(
tf.less_equal(
tf.abs(batched_melspec - feature_mean), thresh * feature_std + 1e-4
),
batched_melspec.dtype,
)
# number of non-zero elements per channel.
denom = tf.math.reduce_sum(mask, axis=1)
masked_x = mask * batched_melspec
masked_mean = tf.math.reduce_sum(masked_x, axis=1) / (denom + 1)
masked_mean = tf.expand_dims(masked_mean, 1)
masked_std = tf.reduce_sum(
mask * tf.square(batched_melspec - masked_mean), axis=1
)
masked_std = tf.sqrt(masked_std / (denom + 1))
masked_std = tf.expand_dims(masked_std, 1)
return (batched_melspec - masked_mean) / (masked_std + 1)
def MixtureDenoise(batched_melspec, thresh=1.5):
"""Denoise melspec using an estimated Gaussian noise distribution.
Forms a noise estimate by a) estimating mean+std, b) removing extreme
values, c) re-estimating mean+std for the noise, and then d) classifying
values in the spectrogram as 'signal' or 'noise' based on likelihood under
the revised estimate. We then apply a mask to return the signal values.
Args:
batched_melspec: Batched melspectrogram with shape [B, T, D]
thresh: z-score theshold for separating signal from noise. On the first
pass, we use 2*thresh, and on the second pass we use thresh directly.
Returns:
Batch of denoised melspectrograms.
"""
x = batched_melspec
feature_mean = tf.expand_dims(tf.math.reduce_mean(x, axis=1), 1)
feature_std = tf.expand_dims(tf.math.reduce_std(x, axis=1), 1)
demeaned = x - feature_mean
is_signal = tf.greater_equal(demeaned, 2 * thresh * feature_std)
is_signal = tf.cast(is_signal, x.dtype)
is_noise = 1.0 - is_signal
noise_counts = tf.reduce_sum(is_noise, axis=1)
noise_mean = tf.math.reduce_sum(x * is_noise, axis=1) / (noise_counts + 1)
noise_mean = tf.expand_dims(noise_mean, 1)
noise_var = tf.reduce_sum(is_noise * tf.square(x - noise_mean), axis=1)
noise_std = tf.sqrt(noise_var / (noise_counts + 1))
noise_std = tf.expand_dims(noise_std, 1)
# Recompute signal/noise separation.
demeaned = x - noise_mean
is_signal = tf.greater_equal(demeaned, thresh * noise_std)
is_signal = tf.cast(is_signal, x.dtype)
is_noise = 1.0 - is_signal
signal_part = is_signal * x
noise_part = is_noise * noise_mean
reconstructed = signal_part + noise_part - noise_mean
return reconstructed
def FindPeaks(summed_spectral_magnitudes, stft_fps):
"""Locate peaks inside signal of summed spectral magnitudes.
Args:
summed_spectral_magnitudes: List of summed spectral components.
stft_fps: Number of summed magnitude bins per second. Calculated from the
original sample of the waveform.
Returns:
List of filtered peak indices in the array of summed spectral magnitudes.
"""
threshold = np.mean(summed_spectral_magnitudes) * 1.5
min_width = int(round(0.5 * stft_fps))
max_width = int(round(2 * stft_fps))
width_step_size = int(round((max_width - min_width) / 10))
widths = range(min_width, max_width, width_step_size)
peaks = signal.find_peaks_cwt(summed_spectral_magnitudes, widths)
margin_frames = int(round(0.3 * stft_fps))
filt_peaks = []
for x in peaks:
passing = [
y >= threshold
for y in summed_spectral_magnitudes[
x - margin_frames : x + margin_frames
]
]
if any(passing):
filt_peaks.append(x)
return filt_peaks
def FindPeaksFromAudio(audio, sample_rate_hz, max_peaks=-1):
"""Construct melspec and find peaks."""
melspec_rate_hz = 100
audio = np.float32(audio)
with tf.Graph().as_default():
with tf.device('cpu:0'):
melspec = Melspec(
audio=audio,
batched=False,
sample_frequency=sample_rate_hz,
melspec_frequency=melspec_rate_hz,
upper_edge_hertz=10000.0,
)
melspec = tf.expand_dims(melspec, 0)
melspec = MixtureDenoise(melspec, 0.75)[0]
melspec = tf.Session().run(melspec)
peaks = FindPeaks(np.sum(melspec, axis=1), melspec_rate_hz)
peak_energies = np.sum(melspec, axis=1)[peaks]
def TMelToTAu(tm):
return 1.0 * tm * sample_rate_hz / melspec_rate_hz
peaks = [TMelToTAu(p) for p in peaks]
peak_set = sorted(zip(peak_energies, peaks), reverse=True)
if max_peaks > 0 and len(peaks) > max_peaks:
peak_set = peak_set[:max_peaks]
peaks = [p[1] for p in peak_set]
return peaks
def MidpointToInterval(midpoint, length_t, min_t, max_t):
"""Find start and endpoints for interval, given a desired midpoint."""
left_endpoint = midpoint - length_t / 2
right_endpoint = midpoint + length_t / 2
# Shift endpoints to try to make the interval length_t, if possible.
right_overhang = max(right_endpoint - max_t, 0)
left_endpoint -= right_overhang
left_overhang = max(min_t - left_endpoint, 0)
right_endpoint += left_overhang
left_endpoint = int(max(min_t, left_endpoint))
right_endpoint = int(min(max_t, right_endpoint))
return (left_endpoint, right_endpoint)
def SlicePeakedAudio(audio, sample_rate_hz, interval_s=2, max_intervals=5):
"""Extract audio intervals from melspec peaks."""
if audio.shape[0] <= interval_s * sample_rate_hz:
return {(0, audio.shape[0]): audio}
peaks = FindPeaksFromAudio(audio, sample_rate_hz, max_intervals)
interval_samples = int(interval_s * sample_rate_hz)
intervals = {
MidpointToInterval(p, interval_samples, 0, audio.shape[0]) for p in peaks
}
intervals = {(a, b): audio[a:b] for (a, b) in intervals}
return intervals
def LowpassFiltersBatch(batch_size=64, channels=160):
"""Create a batch of random low-pass rolloff frequency envelopes."""
slopes = tf.random_uniform([batch_size, 1], minval=2, maxval=8)
offsets = tf.random_uniform([batch_size, 1], minval=0, maxval=5)
xspace = tf.expand_dims(tf.linspace(0.0, 1.0, channels), 0)
xspace = tf.tile(xspace, [batch_size, 1])
envelopes = 1 - 0.5 * (tf.tanh(slopes * (xspace - 0.5) - offsets) + 1)
return envelopes
def ApplySpecAugmentMask(target, axis, min_length=0.0, max_length=0.5):
"""Generate 0/1 mask."""
batch_size = tf.shape(target)[0]
dtype = target.dtype
masked_portion = tf.random.uniform(
[batch_size], minval=min_length, maxval=max_length, dtype=dtype
)
mask_length = tf.cast(
masked_portion * tf.cast(target.shape[axis], tf.float32), tf.int64
)
diag = tf.range(target.shape.as_list()[axis], dtype=tf.int64)
diag = tf.expand_dims(diag, 0)
diag = tf.tile(diag, [batch_size, 1])
mask = tf.greater_equal(diag, tf.expand_dims(mask_length, 1))
mask = tf.cast(mask, dtype)
# Roll each batch element randomly...
# pylint: disable=g-long-lambda
def RandRoll(x):
return tf.roll(
x,
tf.random.uniform(
[], minval=0, maxval=target.shape[axis], dtype=tf.int64
),
axis=0,
)
mask = tf.map_fn(RandRoll, mask, dtype=mask.dtype, back_prop=False)
if axis == 1:
mask = tf.expand_dims(mask, axis=2)
else:
mask = tf.expand_dims(mask, axis=1)
masked = mask * target
return masked
def ApplySpecAugment(raw_melspec, batched=False, **kwargs):
"""Apply spectral augmentations."""
if not batched:
melspec = tf.expand_dims(raw_melspec, 0)
else:
melspec = raw_melspec
if 'specaug_max_freq_mask' in kwargs:
max_length = kwargs['specaug_max_freq_mask']
else:
max_length = None
melspec = ApplySpecAugmentMask(
melspec, 2, min_length=0.0, max_length=max_length
)
melspec = ApplySpecAugmentMask(
melspec, 1, min_length=0.0, max_length=max_length
)
if batched:
return melspec
else:
return melspec[0, :, :]
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract embeddings for a corpus of audio."""
import time
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
from chirp import config_utils
from chirp.configs import config_globals
from chirp.inference import embed_lib
from etils import epath
import numpy as np
FLAGS = flags.FLAGS
_CONFIG_KEY = flags.DEFINE_string(
'config', 'raw_soundscapes', 'Name of the config to use.'
)
_DRY_RUN_ONLY = flags.DEFINE_bool(
'dry_run', False, 'Whether to execute a dry-run only.'
)
_DRY_RUN_CROP_S = flags.DEFINE_float(
'dry_run_crop_s', 10.0, 'Amount of audio to use for dry run.'
)
def dry_run(config, source_infos):
"""Perform a dry run: check that the model loads and can process a file."""
test_embed_fn = embed_lib.EmbedFn(**config.embed_fn_config)
print('starting dry run....')
test_embed_fn.setup()
print(' loaded test model....')
start = time.time()
test_source = np.random.choice(source_infos)
print(f' processing test source {test_source}')
got = test_embed_fn.process(test_source, _DRY_RUN_CROP_S.value)
elapsed = time.time() - start
if not got:
# pylint: disable=broad-exception-raised
raise ValueError('Something went wrong; no results found.')
test_embed_fn.teardown()
print(f'Dry run successful! Party! Inference time : {elapsed:5.3f}')
def main(unused_argv: Sequence[str]) -> None:
logging.info('Loading config')
config = embed_lib.get_config(_CONFIG_KEY.value)
config = config_utils.parse_config(config, config_globals.get_globals())
logging.info('Locating source files...')
# Create and run the beam pipeline.
source_infos = embed_lib.create_source_infos(
config.source_file_patterns,
config.num_shards_per_file,
config.shard_len_s,
)
logging.info('Found %d source infos.', len(source_infos))
if not source_infos:
raise ValueError('No source infos found.')
if _DRY_RUN_ONLY.value:
dry_run(config, source_infos)
return
output_dir = epath.Path(config.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
embed_lib.maybe_write_config(config, output_dir)
options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner',
direct_num_workers=config.num_direct_workers,
direct_running_mode='in_memory')
pipeline = beam.Pipeline(options=options)
embed_fn = embed_lib.EmbedFn(**config.embed_fn_config)
embed_lib.build_run_pipeline(
pipeline, config.output_dir, source_infos, embed_fn
)
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of inference interfaces for applying trained models."""
import dataclasses
import tempfile
from typing import Any
from absl import logging
from chirp.inference import interface
from chirp.models import frontend
from chirp.models import handcrafted_features
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
from etils import epath
from ml_collections import config_dict
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import tensorflow_hub as hub
def model_class_map() -> dict[str, Any]:
"""Get the mapping of model keys to classes."""
return {
'taxonomy_model_tf': TaxonomyModelTF,
'separator_model_tf': SeparatorModelTF,
'birb_separator_model_tf1': BirbSepModelTF1,
'birdnet': BirdNet,
'placeholder_model': PlaceholderModel,
'separate_embed_model': SeparateEmbedModel,
'tfhub_model': TFHubModel,
}
@dataclasses.dataclass
class SeparateEmbedModel(interface.EmbeddingModel):
"""Wrapper for separate separation and embedding models.
Note: Use the separation model's sample rate. The embedding model's sample
rate is used to resample prior to computing the embedding.
Attributes:
separation_model: SeparationModelTF.
embedding_model: TaxonomyModelTF.
embed_raw: If True, the outputs will include embeddings of the original
audio in addition to embeddings for the separated channels. The embeddings
will have shape [T, C+1, D], with the raw audio embedding on channel 0.
"""
separator_model_tf_config: config_dict.ConfigDict
taxonomy_model_tf_config: config_dict.ConfigDict
separation_model: 'SeparatorModelTF'
embedding_model: 'TaxonomyModelTF'
embed_raw: bool = True
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'SeparateEmbedModel':
separation_model = SeparatorModelTF.from_config(
config.separator_model_tf_config
)
embedding_model = TaxonomyModelTF.from_config(
config.taxonomy_model_tf_config
)
return cls(
separation_model=separation_model,
embedding_model=embedding_model,
**config,
)
def __post_init__(self):
if self.separation_model.sample_rate != self.embedding_model.sample_rate:
raise ValueError(
'Separation and embedding models must have matching rates.'
)
def embed(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
# Frame the audio according to the embedding model's config.
# We then apply separation to each frame independently, and embed
# the separated audio.
framed_audio = self.frame_audio(
audio_array,
self.embedding_model.window_size_s,
self.embedding_model.hop_size_s,
)
# framed_audio has shape [Frames, Time]
separation_outputs = self.separation_model.batch_embed(framed_audio)
# separated_audio has shape [F, C, T]
separated_audio = separation_outputs.separated_audio
if separated_audio is None:
raise RuntimeError('Separation model returned None for separated audio.')
if self.embed_raw:
separated_audio = np.concatenate(
[
framed_audio[:, np.newaxis, : separated_audio.shape[-1]],
separated_audio,
],
axis=1,
)
num_frames = separated_audio.shape[0]
num_channels = separated_audio.shape[1]
num_samples = separated_audio.shape[2]
separated_audio = np.reshape(separated_audio, [-1, num_samples])
embedding_outputs = self.embedding_model.batch_embed(separated_audio)
if embedding_outputs.embeddings is not None:
# Batch embeddings have shape [Batch, Time, Channels, Features]
# Time is 1 because we have framed using the embedding model's
# window_size. The batch size is num_frames * num_channels.
embedding_outputs.embeddings = np.reshape(
embedding_outputs.embeddings, [num_frames, num_channels, -1]
)
# Take the maximum logits over the channels dimension.
if embedding_outputs.logits is not None:
max_logits = {}
for k, v in embedding_outputs.logits.items():
v = v.reshape([num_frames, num_channels, -1])
max_logits[k] = np.max(v, axis=1)
else:
max_logits = None
return interface.InferenceOutputs(
embeddings=embedding_outputs.embeddings,
logits=max_logits,
# Because the separated audio is framed, it does not match the
# outputs interface, so we do not return it.
separated_audio=None,
)
@dataclasses.dataclass
class BirbSepModelTF1(interface.EmbeddingModel):
"""Separation model from the Bird MixIT paper.
Example usage:
```
from chirp.inference import models
birbsep1_config = config_dict.ConfigDict({
'model_path': $MODEL_PATH,
'window_size_s': 60.0,
'keep_raw_channel': False,
'sample_rate': 22050,
})
birbsep1 = models.BirbSepModelTF1.from_config(birbsep1_config)
outputs = birbsep1.embed($SOME_AUDIO)
```
"""
model_path: str
window_size_s: float
keep_raw_channel: bool
session: Any
input_placeholder_ns: Any
output_tensor_ns: Any
@classmethod
def _find_checkpoint(cls, model_path: str) -> str:
# Publicly released model does not have a checkpoints directory file.
ckpt = None
for ckpt in sorted(
tuple(epath.Path(model_path).glob('model.ckpt-*.index'))
):
ckpt = ckpt.as_posix()[: -len('.index')]
if ckpt is None:
raise FileNotFoundError('Could not find checkpoint file.')
return ckpt
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'BirbSepModelTF1':
"""Load model files and create TF1 session graph."""
metagraph_path_ns = epath.Path(config.model_path) / 'inference.meta'
checkpoint_path = cls._find_checkpoint(config.model_path)
graph_ns = tf.Graph()
sess_ns = tf1.Session(graph=graph_ns)
with graph_ns.as_default():
new_saver = tf1.train.import_meta_graph(metagraph_path_ns)
new_saver.restore(sess_ns, checkpoint_path)
input_placeholder_ns = graph_ns.get_tensor_by_name(
'input_audio/receiver_audio:0'
)
output_tensor_ns = graph_ns.get_tensor_by_name('denoised_waveforms:0')
session = sess_ns
return cls(
session=session,
input_placeholder_ns=input_placeholder_ns,
output_tensor_ns=output_tensor_ns,
**config,
)
def embed(self, audio_array: Any) -> interface.InferenceOutputs:
start_sample = 0
window_size = int(self.window_size_s * self.sample_rate)
sep_chunks = []
raw_chunks = []
while start_sample <= audio_array.shape[0]:
audio_chunk = audio_array[start_sample : start_sample + window_size]
raw_chunks.append(audio_chunk)
separated_audio = self.session.run(
self.output_tensor_ns,
feed_dict={
self.input_placeholder_ns: audio_chunk[np.newaxis, np.newaxis, :]
},
)
# Drop the extraneous batch dimension.
separated_audio = np.squeeze(separated_audio, axis=0)
sep_chunks.append(separated_audio)
start_sample += window_size
raw_chunks = np.concatenate(raw_chunks, axis=0)
sep_chunks = np.concatenate(sep_chunks, axis=-1)
if self.keep_raw_channel:
sep_chunks = np.concatenate(
[sep_chunks, raw_chunks[np.newaxis, :]], axis=0
)
return interface.InferenceOutputs(separated_audio=sep_chunks)
def batch_embed(self, audio_batch: np.ndarray) -> interface.InferenceOutputs:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
@dataclasses.dataclass
class TaxonomyModelTF(interface.EmbeddingModel):
"""Taxonomy SavedModel.
Attributes:
model_path: Path to model files.
window_size_s: Window size for framing audio in seconds. TODO(tomdenton):
Ideally this should come from a model metadata file.
hop_size_s: Hop size for inference.
model: Loaded TF SavedModel.
class_list: Loaded class_list for the model's output logits.
batchable: Whether the model supports batched input.
target_class_list: If provided, restricts logits to this ClassList.
target_peak: Peak normalization value.
"""
model_path: str
window_size_s: float
hop_size_s: float
model: Any # TF SavedModel
class_list: namespace.ClassList
batchable: bool
target_class_list: namespace.ClassList | None = None
target_peak: float | None = 0.25
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'TaxonomyModelTF':
logging.info('Loading taxonomy model...')
base_path = epath.Path(config.model_path)
if (base_path / 'saved_model.pb').exists() and (
base_path / 'assets'
).exists():
# This looks like a TFHub downloaded model.
model_path = base_path
label_csv_path = epath.Path(config.model_path) / 'assets' / 'label.csv'
else:
# Probably a savedmodel distributed directly.
model_path = base_path / 'savedmodel'
label_csv_path = base_path / 'label.csv'
model = tf.saved_model.load(model_path)
with label_csv_path.open('r') as f:
class_list = namespace.ClassList.from_csv(f)
# Check whether the model support polymorphic batch shape.
sig = model.signatures['serving_default']
batchable = sig.inputs[0].shape[0] is None
return cls(
model=model, class_list=class_list, batchable=batchable, **config
)
def embed(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
if self.batchable:
return interface.embed_from_batch_embed_fn(self.batch_embed, audio_array)
# Process one example at a time.
# This should be fine on CPU, but may be somewhat inefficient for large
# arrays on GPU or TPU.
framed_audio = self.frame_audio(
audio_array, self.window_size_s, self.hop_size_s
)
if self.target_peak is not None:
framed_audio = self.normalize_audio(framed_audio, self.target_peak)
all_logits, all_embeddings = self.model.infer_tf(framed_audio[:1])
for window in framed_audio[1:]:
logits, embeddings = self.model.infer_tf(window[np.newaxis, :])
all_logits = np.concatenate([all_logits, logits], axis=0)
all_embeddings = np.concatenate([all_embeddings, embeddings], axis=0)
all_embeddings = all_embeddings[:, np.newaxis, :]
all_logits = self.convert_logits(
all_logits, self.class_list, self.target_class_list
)
return interface.InferenceOutputs(
all_embeddings, {'label': all_logits}, None
)
def batch_embed(
self, audio_batch: np.ndarray[Any, Any]
) -> interface.InferenceOutputs:
if not self.batchable:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
framed_audio = self.frame_audio(
audio_batch, self.window_size_s, self.hop_size_s
)
if self.target_peak is not None:
framed_audio = self.normalize_audio(framed_audio, self.target_peak)
rebatched_audio = framed_audio.reshape([-1, framed_audio.shape[-1]])
logits, embeddings = self.model.infer_tf(rebatched_audio)
logits = self.convert_logits(
logits, self.class_list, self.target_class_list
)
logits = np.reshape(logits, framed_audio.shape[:2] + (logits.shape[-1],))
embeddings = np.reshape(
embeddings, framed_audio.shape[:2] + (embeddings.shape[-1],)
)
return interface.InferenceOutputs(embeddings, {'label': logits}, None)
@dataclasses.dataclass
class SeparatorModelTF(interface.EmbeddingModel):
"""Separator SavedModel.
Attributes:
model_path: Path to model files.
frame_size: Audio frame size for separation model.
model: Loaded TF SavedModel.
class_list: Loaded class_list for the model's output logits.
target_class_list: If provided, restricts logits to this ClassList.
windows_size_s: Window size for framing audio in samples. The audio will be
chunked into frames of size window_size_s, which may help avoid memory
blowouts. If None, will simply treat all audio as a single frame.
"""
model_path: str
frame_size: int
model: Any
class_list: namespace.ClassList
target_class_list: namespace.ClassList | None = None
window_size_s: float | None = None
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'SeparatorModelTF':
logging.info('Loading taxonomy separator model...')
model = tf.saved_model.load(epath.Path(config.model_path) / 'savedmodel')
label_csv_path = epath.Path(config.model_path) / 'label.csv'
with label_csv_path.open('r') as f:
class_list = namespace.ClassList.from_csv(f)
return cls(model=model, class_list=class_list, **config)
def embed(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
# Drop samples to allow reshaping to frame_size
excess_samples = audio_array.shape[0] % self.frame_size
if excess_samples > 0:
audio_array = audio_array[:-excess_samples]
framed_audio = self.frame_audio(
audio_array, self.window_size_s, self.window_size_s
)
framed_audio = np.reshape(
framed_audio,
[
framed_audio.shape[0],
framed_audio.shape[1] // self.frame_size,
self.frame_size,
],
)
sep_audio, all_logits, all_embeddings = self.model.infer_tf(
framed_audio[:1]
)
for window in framed_audio[1:]:
separated, logits, embeddings = self.model.infer_tf(window[np.newaxis, :])
sep_audio = np.concatenate([sep_audio, separated], axis=0)
all_logits = np.concatenate([all_logits, logits], axis=0)
all_embeddings = np.concatenate([all_embeddings, embeddings], axis=0)
all_embeddings = all_embeddings[:, np.newaxis, :]
# Recombine batch and time dimensions.
sep_audio = np.reshape(sep_audio, [-1, sep_audio.shape[-1]])
all_logits = np.reshape(all_logits, [-1, all_logits.shape[-1]])
all_logits = self.convert_logits(
all_logits, self.class_list, self.target_class_list
)
all_embeddings = np.reshape(all_embeddings, [-1, all_embeddings.shape[-1]])
return interface.InferenceOutputs(
all_embeddings, {'label': all_logits}, sep_audio
)
def batch_embed(self, audio_batch: np.ndarray) -> interface.InferenceOutputs:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
@dataclasses.dataclass
class BirdNet(interface.EmbeddingModel):
"""Wrapper for BirdNet models.
Attributes:
model_path: Path to the saved model checkpoint or TFLite file.
model: The TF SavedModel or TFLite interpreter.
tflite: Whether the model is a TFLite model.
class_list: The loaded class list.
window_size_s: Window size for framing audio in samples.
hop_size_s: Hop size for inference.
num_tflite_threads: Number of threads to use with TFLite model.
class_list_name: Name of the BirdNet class list.
target_class_list: If provided, restricts logits to this ClassList.
"""
model_path: str
model: Any
tflite: bool
class_list: namespace.ClassList
window_size_s: float = 3.0
hop_size_s: float = 3.0
num_tflite_threads: int = 16
class_list_name: str = 'birdnet_v2_1'
target_class_list: namespace.ClassList | None = None
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'BirdNet':
logging.info('Loading BirdNet model...')
if config.model_path.endswith('.tflite'):
tflite = True
with tempfile.NamedTemporaryFile() as tmpf:
model_file = epath.Path(config.model_path)
model_file.copy(tmpf.name, overwrite=True)
model = tf.lite.Interpreter(
tmpf.name, num_threads=config.num_tflite_threads
)
model.allocate_tensors()
else:
tflite = False
model = tf.saved_model.load(config.model_path)
db = namespace_db.load_db()
class_list = db.class_lists[config.class_list_name]
return cls(
model=model,
tflite=tflite,
class_list=class_list,
**config,
)
def embed_saved_model(
self, audio_array: np.ndarray
) -> interface.InferenceOutputs:
"""Get logits using the BirdNet SavedModel."""
# Note that there is no easy way to get the embedding from the SavedModel.
all_logits = self.model(audio_array[:1])
for window in audio_array[1:]:
logits = self.model(window[np.newaxis, :])
all_logits = np.concatenate([all_logits, logits], axis=0)
all_logits = self.convert_logits(
all_logits, self.class_list, self.target_class_list
)
return interface.InferenceOutputs(
None, {self.class_list_name: all_logits}, None
)
def embed_tflite(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
"""Create an embedding and logits using the BirdNet TFLite model."""
input_details = self.model.get_input_details()[0]
output_details = self.model.get_output_details()[0]
embedding_idx = output_details['index'] - 1
embeddings = []
logits = []
for audio in audio_array:
self.model.set_tensor(
input_details['index'], np.float32(audio)[np.newaxis, :]
)
self.model.invoke()
logits.append(self.model.get_tensor(output_details['index']))
embeddings.append(self.model.get_tensor(embedding_idx))
# Create [Batch, 1, Features]
embeddings = np.array(embeddings)
logits = np.array(logits)
logits = self.convert_logits(
logits, self.class_list, self.target_class_list
)
return interface.InferenceOutputs(
embeddings, {self.class_list_name: logits}, None
)
def embed(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
framed_audio = self.frame_audio(
audio_array, self.window_size_s, self.hop_size_s
)
if self.tflite:
return self.embed_tflite(framed_audio)
else:
return self.embed_saved_model(framed_audio)
def batch_embed(self, audio_batch: np.ndarray) -> interface.InferenceOutputs:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
@dataclasses.dataclass
class HandcraftedFeaturesModel(interface.EmbeddingModel):
"""Wrapper for simple feature extraction."""
window_size_s: float
hop_size_s: float
melspec_config: config_dict.ConfigDict
melspec_layer: frontend.Frontend
features_config: config_dict.ConfigDict
features_layer: handcrafted_features.HandcraftedFeatures
@classmethod
def from_config(
cls, config: config_dict.ConfigDict
) -> 'HandcraftedFeaturesModel':
melspec_layer = frontend.MelSpectrogram(**config.melspec_config)
features_layer = handcrafted_features.HandcraftedFeatures(
**config.features_config
)
return cls(
melspec_layer=melspec_layer,
features_layer=features_layer,
**config,
)
@classmethod
def beans_baseline(cls, sample_rate=32000, frame_rate=100):
stride = sample_rate // frame_rate
mel_config = config_dict.ConfigDict({
'sample_rate': sample_rate,
'features': 160,
'stride': stride,
'kernel_size': 2 * stride,
'freq_range': (60.0, sample_rate / 2.0),
'scaling_config': frontend.LogScalingConfig(),
})
features_config = config_dict.ConfigDict({
'compute_mfccs': True,
'aggregation': 'beans',
})
config = config_dict.ConfigDict({
'sample_rate': sample_rate,
'melspec_config': mel_config,
'features_config': features_config,
'window_size_s': 1.0,
'hop_size_s': 1.0,
})
# pylint: disable=unexpected-keyword-arg
return HandcraftedFeaturesModel.from_config(config)
def embed(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
framed_audio = self.frame_audio(
audio_array, self.window_size_s, self.hop_size_s
)
melspec = self.melspec_layer.apply({}, framed_audio)
features = self.features_layer.apply(
{}, melspec[:, :, :, np.newaxis], train=False
)
# Add a trivial channels dimension.
features = features[:, np.newaxis, :]
return interface.InferenceOutputs(features, None, None)
def batch_embed(self, audio_batch: np.ndarray) -> interface.InferenceOutputs:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
@dataclasses.dataclass
class TFHubModel(interface.EmbeddingModel):
"""Generic wrapper for TFHub models which produce embeddings."""
model: Any # TFHub loaded model.
model_url: str
embedding_index: int
logits_index: int = -1
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'TFHubModel':
model = hub.load(config.model_url)
return cls(
model=model,
**config,
)
@classmethod
def yamnet(cls):
# Parent class takes a sample_rate arg which pylint doesn't find.
config = config_dict.ConfigDict({
'sample_rate': 16000,
'model_url': 'https://tfhub.dev/google/yamnet/1',
'embedding_index': 1,
'logits_index': 0,
})
return TFHubModel.from_config(config)
@classmethod
def vggish(cls):
config = config_dict.ConfigDict({
'sample_rate': 16000,
'model_url': 'https://tfhub.dev/google/vggish/1',
'embedding_index': -1,
'logits_index': -1,
})
return TFHubModel.from_config(config)
def embed(
self, audio_array: np.ndarray[Any, np.dtype[Any]]
) -> interface.InferenceOutputs:
outputs = self.model(audio_array)
if self.embedding_index < 0:
embeddings = outputs
else:
embeddings = outputs[self.embedding_index]
if len(embeddings.shape) == 1:
embeddings = embeddings[np.newaxis, :]
elif len(embeddings.shape) != 2:
raise ValueError('Embeddings should have shape [Depth] or [Time, Depth].')
if self.logits_index >= 0:
logits = {'label': outputs[self.logits_index]}
else:
logits = None
embeddings = embeddings[:, np.newaxis, :]
return interface.InferenceOutputs(embeddings, logits, None, False)
def batch_embed(self, audio_batch: np.ndarray) -> interface.InferenceOutputs:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
@dataclasses.dataclass
class PlaceholderModel(interface.EmbeddingModel):
"""Test implementation of the EmbeddingModel interface."""
embedding_size: int = 128
make_embeddings: bool = True
make_logits: bool = True
make_separated_audio: bool = True
target_class_list: namespace.ClassList | None = None
window_size_s: float = 1.0
hop_size_s: float = 1.0
@classmethod
def from_config(cls, config: config_dict.ConfigDict) -> 'PlaceholderModel':
return cls(**config)
def __post_init__(self):
db = namespace_db.load_db()
self.class_list = db.class_lists['caples']
def embed(self, audio_array: np.ndarray) -> interface.InferenceOutputs:
outputs = {}
time_size = audio_array.shape[0] // int(
self.window_size_s * self.sample_rate
)
if self.make_embeddings:
outputs['embeddings'] = np.zeros(
[time_size, 1, self.embedding_size], np.float32
)
if self.make_logits:
outputs['logits'] = {
'label': np.zeros(
[time_size, len(self.class_list.classes)], np.float32
),
'other_label': np.ones(
[time_size, len(self.class_list.classes)], np.float32
),
}
outputs['logits']['label'] = self.convert_logits(
outputs['logits']['label'], self.class_list, self.target_class_list
)
if self.make_separated_audio:
outputs['separated_audio'] = np.zeros(
[2, audio_array.shape[-1]], np.float32
)
return interface.InferenceOutputs(**outputs)
def batch_embed(self, audio_batch: np.ndarray) -> interface.InferenceOutputs:
return interface.batch_embed_from_embed_fn(self.embed, audio_batch)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for models producing embeddings."""
import dataclasses
from typing import Any, Callable, Dict
from absl import logging
from chirp.taxonomy import namespace
from etils import epath
import librosa
from ml_collections import config_dict
import numpy as np
import tensorflow as tf
LogitType = Dict[str, np.ndarray]
NULL_LOGIT = -20.0
POOLING_METHODS = ['first', 'mean', 'max', 'mid', 'flatten', 'squeeze']
@dataclasses.dataclass
class InferenceOutputs:
"""Wrapper class for outputs from an inference model.
Attributes:
embeddings: Embeddings array with shape [Frames, Channels, Features].
logits: Dictionary mapping a class list L's name to an array of logits. The
logits array has shape [Frames, L.size] or [Frames, Channels, L.size].
separated_audio: Separated audio channels with shape [Channels, Samples].
batched: If True, each output has an additonal batch dimension.
"""
embeddings: np.ndarray | None = None
logits: LogitType | None = None
separated_audio: np.ndarray | None = None
batched: bool = False
def __post_init__(self):
# In some scenarios, we may be passed TF EagerTensors. We dereference these
# to numpy arrays for broad compatibility.
if hasattr(self.embeddings, 'numpy'):
self.embeddings = self.embeddings.numpy()
if self.logits is not None:
for k, v in self.logits.items():
if hasattr(v, 'numpy'):
self.logits[k] = v.numpy()
if hasattr(self.separated_audio, 'numpy'):
self.separated_audio = self.separated_audio.numpy()
def pooled_embeddings(
self, time_pooling: str, channel_pooling: str = ''
) -> np.ndarray:
"""Reduce embeddings over the time and/or channel axis."""
# Shape is either [B, F, C, D] or [F, C, D], so the time axis is -3.
outputs = pool_axis(self.embeddings, -3, time_pooling)
outputs = pool_axis(outputs, -2, channel_pooling)
return outputs
EmbedFnType = Callable[[np.ndarray], InferenceOutputs]
@dataclasses.dataclass
class EmbeddingModel:
"""Wrapper for a model which produces audio embeddings.
It is encouraged to implement either the `embed` or `batch_embed` function
and use a convenience method (`batch_embed_from_embed_fn` or
`embed_from_batch_embed_fn`) to get the other. It is preferable to implement
`batch_embed` so long as the model accepts batch input, as batch input
inference can be much faster.
Attributes:
sample_rate: Sample rate in hz.
"""
sample_rate: int
@classmethod
def from_config(
cls, model_config: config_dict.ConfigDict
) -> 'EmbeddingModel':
"""Load the model from a configuration dict."""
raise NotImplementedError
def embed(self, audio_array: np.ndarray) -> InferenceOutputs:
"""Create InferenceOutputs from an audio array.
Args:
audio_array: An array with shape [Time] containing unit-scaled audio.
Returns:
An InferenceOutputs object.
"""
raise NotImplementedError
def batch_embed(self, audio_batch: np.ndarray) -> InferenceOutputs:
"""Create InferenceOutputs from a batch of audio arrays.
Args:
audio_batch: An array with shape [Time] containing unit-scaled audio.
Returns:
An InferenceOutputs object.
"""
raise NotImplementedError
def convert_logits(
self,
logits: np.ndarray,
source_class_list: namespace.ClassList,
target_class_list: namespace.ClassList | None,
) -> np.ndarray:
"""Convert model logits to logits for a different class list."""
if target_class_list is None:
return logits
sp_matrix, sp_mask = source_class_list.get_class_map_matrix(
target_class_list
)
# When we convert from ClassList A (used for training) to ClassList B
# (for inference output) there may be labels in B which don't appear in A.
# The `sp_mask` tells us which labels appear in both A and B. We set the
# logit for the new labels to NULL_LOGIT, which corresponds to a probability
# very close to zero.
return logits @ sp_matrix + NULL_LOGIT * (1 - sp_mask)
def frame_audio(
self,
audio_array: np.ndarray,
window_size_s: float | None,
hop_size_s: float,
) -> np.ndarray:
"""Helper function for framing audio for inference along the last axis."""
if window_size_s is None or window_size_s < 0:
return np.expand_dims(audio_array, axis=-2)
frame_length = int(window_size_s * self.sample_rate)
hop_length = int(hop_size_s * self.sample_rate)
if audio_array.shape[-1] < frame_length:
audio_array = librosa.util.pad_center(audio_array, frame_length, axis=-1)
# Librosa frames as [..., frame_length, frames], so we need a transpose.
framed_audio = librosa.util.frame(
audio_array,
frame_length=frame_length,
hop_length=hop_length,
axis=-1,
).swapaxes(-1, -2)
return framed_audio
def normalize_audio(
self,
framed_audio: np.ndarray,
target_peak: float,
) -> np.ndarray:
"""Normalizes audio with shape [..., T] to match the target_peak value."""
framed_audio = framed_audio.copy()
framed_audio -= np.mean(framed_audio, axis=-1, keepdims=True)
peak_norm = np.max(np.abs(framed_audio), axis=-1, keepdims=True)
framed_audio = np.divide(framed_audio, peak_norm, where=(peak_norm > 0.0))
framed_audio = framed_audio * target_peak
return framed_audio
@dataclasses.dataclass
class LogitsOutputHead:
"""A TensorFlow model which classifies embeddings.
Attributes:
model_path: Path to saved model.
logits_key: Name of this output head.
logits_model: Callable model converting embeddings of shape [B,
embedding_width] to [B, num_classes].
class_list: ClassList specifying the ordered classes.
channel_pooling: Pooling to apply to channel dimension of logits. Specify an
empty string to apply no pooling.
"""
model_path: str
logits_key: str
logits_model: Any
class_list: namespace.ClassList
channel_pooling: str = 'max'
@classmethod
def from_config(cls, config: config_dict.ConfigDict):
logits_model = tf.saved_model.load(config.model_path)
model_path = epath.Path(config.model_path)
with (model_path / 'class_list.csv').open('r') as f:
class_list = namespace.ClassList.from_csv(f)
return cls(
logits_model=logits_model,
class_list=class_list,
**config,
)
def save_model(self, output_path: str, embeddings_path: str):
"""Write a SavedModel and metadata to disk."""
# Write the model.
tf.saved_model.save(self.logits_model, output_path)
output_path = epath.Path(output_path)
# Copy the embeddings_config if provided
if embeddings_path:
(epath.Path(embeddings_path) / 'config.json').copy(
output_path / 'embeddings_config.json', overwrite=True
)
# Write the class list.
with (output_path / 'class_list.csv').open('w') as f:
f.write(self.class_list.to_csv())
def add_logits(self, model_outputs: InferenceOutputs):
"""Update the model_outputs to include logits from this output head."""
embeddings = model_outputs.embeddings
if embeddings is None:
logging.warning('No embeddings found in model outputs.')
return model_outputs
flat_embeddings = np.reshape(embeddings, [-1, embeddings.shape[-1]])
flat_logits = self.logits_model(flat_embeddings)
logits_shape = np.concatenate(
[np.shape(embeddings)[:-1], np.shape(flat_logits)[-1:]], axis=0
)
logits = np.reshape(flat_logits, logits_shape)
# Embeddings have shape [B, T, C, D] or [T, C, D], so our logits also
# have a channel dimension.
# Output logits should have shape [B, T, D] or [T, D], so we reduce the
# channel axis as specified by the user.
# The default is 'max' which is reasonable for separated audio and
# is equivalent to 'squeeze' for the single-channel case.
logits = pool_axis(logits, -2, self.channel_pooling)
new_outputs = InferenceOutputs(
embeddings=model_outputs.embeddings,
logits=model_outputs.logits,
separated_audio=model_outputs.separated_audio,
batched=model_outputs.batched,
)
if new_outputs.logits is None:
new_outputs.logits = {}
new_outputs.logits[self.logits_key] = logits
return new_outputs
def embed_from_batch_embed_fn(
embed_fn: EmbedFnType, audio_array: np.ndarray
) -> InferenceOutputs:
"""Embed a single example using a batch_embed_fn."""
audio_batch = audio_array[np.newaxis, :]
outputs = embed_fn(audio_batch)
if outputs.embeddings is not None:
embeddings = outputs.embeddings[0]
else:
embeddings = None
if outputs.logits is not None:
logits = {}
for k, v in outputs.logits.items():
logits[k] = v[0]
else:
logits = None
if outputs.separated_audio is not None:
separated_audio = outputs.separated_audio[0]
else:
separated_audio = None
return InferenceOutputs(
embeddings=embeddings,
logits=logits,
separated_audio=separated_audio,
batched=False,
)
def batch_embed_from_embed_fn(
embed_fn: EmbedFnType, audio_batch: np.ndarray
) -> InferenceOutputs:
"""Embed a batch of audio using a single-example embed_fn."""
outputs = []
for audio in audio_batch:
outputs.append(embed_fn(audio))
if outputs[0].embeddings is not None:
embeddings = np.stack([x.embeddings for x in outputs], axis=0)
else:
embeddings = None
if outputs[0].logits is not None:
batched_logits = {}
for logit_key in outputs[0].logits:
batched_logits[logit_key] = np.stack(
[x.logits[logit_key] for x in outputs], axis=0
)
else:
batched_logits = None
if outputs[0].separated_audio is not None:
separated_audio = np.stack([x.separated_audio for x in outputs], axis=0)
else:
separated_audio = None
return InferenceOutputs(
embeddings=embeddings,
logits=batched_logits,
separated_audio=separated_audio,
batched=True,
)
def pool_axis(ar: np.ndarray, axis: int, pooling: str) -> np.ndarray:
"""Apply the specified pooling along the target axis."""
if pooling == 'first':
outputs = ar.take(0, axis=axis)
elif pooling == 'squeeze':
# Like 'first' but throws an exception if more than one time step.
outputs = ar.squeeze(axis=axis)
elif pooling == 'mean':
outputs = ar.mean(axis=axis)
elif pooling == 'max':
outputs = ar.max(axis=axis)
elif pooling == 'mid':
midpoint_index = ar.shape[axis] // 2
outputs = ar.take(midpoint_index, axis=axis)
elif pooling == 'flatten':
# Flatten the target axis dimension into the last dimension.
outputs = ar.swapaxes(axis, -2)
new_shape = outputs.shape[:-2] + (outputs.shape[-1] * outputs.shape[-2],)
outputs = outputs.reshape(new_shape)
elif not pooling:
outputs = ar
else:
raise ValueError(f'Unrecognized pooling method {pooling}.')
return outputs
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create embeddings for an audio corpus."""
import dataclasses
import importlib
import json
import os
from typing import Any, Sequence
from absl import logging
import apache_beam as beam
import audioread
from chirp import audio_utils
from chirp import path_utils
from chirp.inference import interface
from chirp.inference import models
from chirp.inference import tf_examples
from etils import epath
from ml_collections import config_dict
import numpy as np
import soundfile
import tensorflow as tf
INFERENCE_CONFIGS_PKG = 'chirp.inference.configs.'
@dataclasses.dataclass
class SourceInfo:
"""Source information for extracting target audio from a file."""
filepath: str
shard_num: int
shard_len_s: float
def file_id(self, file_id_depth: int) -> str:
file_id = epath.Path(
*epath.Path(self.filepath).parts[-(file_id_depth + 1) :]
).as_posix()
return file_id
def create_source_infos(
source_file_patterns: Sequence[str],
num_shards_per_file: int,
shard_len_s: float,
) -> Sequence[SourceInfo]:
"""Expand source file patterns into a list of SourceInfos."""
# TODO(tomdenton): probe each file and create work units in a new Beam stage.
source_files = []
for pattern in source_file_patterns:
for source_file in epath.Path('').glob(pattern):
source_files.append(source_file)
source_file_splits = []
for source in source_files:
for i in range(num_shards_per_file):
source_file_splits.append(SourceInfo(source.as_posix(), i, shard_len_s))
return source_file_splits
class EmbedFn(beam.DoFn):
"""Beam worker function for creating audio embeddings.
TODO(tomdenton): Move most of this functionality into the EmbeddingModel.
This will increase usability in non-beam contexts.
"""
def __init__(
self,
write_embeddings: bool,
write_logits: bool | Sequence[str],
write_separated_audio: bool,
write_raw_audio: bool,
model_key: str,
model_config: config_dict.ConfigDict,
crop_s: float = -1.0,
file_id_depth: int = 0,
min_audio_s: float = 5.0,
embedding_model: interface.EmbeddingModel | None = None,
target_sample_rate: int = -2,
logits_head_config: config_dict.ConfigDict | None = None,
):
"""Initialize the embedding DoFn.
Args:
write_embeddings: Whether to write embeddings.
write_logits: Whether to write output logits. Alternatively, a sequence of
logit keys to write.
write_separated_audio: Whether to write out separated audio tracks.
write_raw_audio: If true, will add the original audio to the output.
model_key: String indicating which model wrapper to use. See MODEL_KEYS.
Only used for setting up the embedding model.
model_config: Keyword arg dictionary for the model wrapper class. Only
used for setting up the embedding model.
crop_s: If greater than zero, run on only the first crop_s seconds.
file_id_depth: Number of parent directories to include in the file_id. eg,
If file_id_depth=2 and the filename is `C://my/file/is/awesome.wav`,
then the file_id will be `file/is/awesome.wav`.
min_audio_s: Minimum allowed audio length, in seconds.
embedding_model: Pre-loaded embedding model.
target_sample_rate: Target sample rate when loading audio. Set to -2 to
use the embedding model's native sample rate, or any positive number to
resample to a fixed rate.
logits_head_config: Optional configuration for a secondary
interface.LogitsOutputHead classifying the model embeddings.
"""
self.model_key = model_key
self.model_config = model_config
self.write_embeddings = write_embeddings
self.write_logits = write_logits
self.write_separated_audio = write_separated_audio
self.write_raw_audio = write_raw_audio
self.crop_s = crop_s
self.embedding_model = embedding_model
self.file_id_depth = file_id_depth
self.min_audio_s = min_audio_s
self.target_sample_rate = target_sample_rate
self.logits_head_config = logits_head_config
self.logits_head = None
def setup(self):
if self.embedding_model is None:
model_class = models.model_class_map()[self.model_key]
self.embedding_model = model_class.from_config(self.model_config)
if hasattr(self, 'model_key'):
del self.model_key
if hasattr(self, 'model_config'):
del self.model_config
if self.target_sample_rate == -2:
self.target_sample_rate = self.embedding_model.sample_rate
elif self.target_sample_rate > 0:
self.target_sample_rate = self.target_sample_rate
else:
raise ValueError('Invalid target_sample_rate.')
if self.logits_head_config is not None:
self.logits_head = interface.LogitsOutputHead.from_config(
self.logits_head_config
)
def load_audio(
self, filepath: str, offset_s: float, window_size_s: float
) -> np.ndarray | None:
audio = audio_utils.load_audio_window(
filepath, offset_s, self.target_sample_rate, window_size_s
)
logging.warning('Audio loaded successfully.')
# Convert audio from jax array to numpy array.
return np.array(audio)
def _log_exception(self, source_info, exception, counter_name):
beam.metrics.Metrics.counter('beaminference', counter_name).inc()
logging.warning(
'The audio at (%s / %d) could not be loaded (%s). '
'The exception was (%s)',
source_info.filepath,
source_info.shard_num,
counter_name,
exception,
)
def audio_to_example(
self, file_id: str, timestamp_offset_s: float, audio: np.ndarray
) -> tf.train.Example:
"""Embed audio and create a TFExample."""
if self.embedding_model is None:
raise ValueError('Embedding model undefined.')
model_outputs = self.embedding_model.embed(audio)
if self.logits_head is not None:
# Update model outputs with logits from the secondary classifier.
model_outputs = self.logits_head.add_logits(model_outputs)
example = tf_examples.model_outputs_to_tf_example(
model_outputs=model_outputs,
file_id=file_id,
audio=audio,
timestamp_offset_s=timestamp_offset_s,
write_raw_audio=self.write_raw_audio,
write_separated_audio=self.write_separated_audio,
write_embeddings=self.write_embeddings,
write_logits=self.write_logits,
)
return example
@beam.typehints.with_output_types(Any)
def process(self, source_info: SourceInfo, crop_s: float = -1.0):
"""Process a source.
Args:
source_info: SourceInfo describing the audio to process.
crop_s: If >0, only the first crop_s seconds will be used. Helpful for
dry-run testing.
Returns:
A TFExample.
"""
file_id = source_info.file_id(self.file_id_depth)
logging.info('...loading audio (%s)', source_info.filepath)
timestamp_offset_s = source_info.shard_num * source_info.shard_len_s
if crop_s > 0:
window_size_s = crop_s
elif self.crop_s > 0:
window_size_s = self.crop_s
elif source_info.shard_len_s > 0:
window_size_s = source_info.shard_len_s
else:
window_size_s = -1
try:
audio = self.load_audio(
source_info.filepath, timestamp_offset_s, window_size_s
)
except soundfile.LibsndfileError as inst:
self._log_exception(source_info, inst, 'audio_libsndfile_error')
return
except ValueError as inst:
self._log_exception(source_info, inst, 'audio_bad_offset')
return
except audioread.NoBackendError as inst:
self._log_exception(source_info, inst, 'audio_no_backend')
return
except EOFError as inst:
self._log_exception(source_info, inst, 'audio_eof_error')
return
except RuntimeError as inst:
if 'Soundfile is not available' in str(inst):
self._log_exception(source_info, inst, 'audio_no_soundfile')
else:
self._log_exception(source_info, inst, 'audio_runtime_error')
return
if audio is None:
self._log_exception(source_info, 'no_exception', 'audio_empty')
return
if audio.shape[0] < self.min_audio_s * self.target_sample_rate:
self._log_exception(source_info, 'no_exception', 'audio_too_short')
return
logging.info(
'...creating embeddings (%s / %d)', file_id, timestamp_offset_s
)
example = self.audio_to_example(file_id, timestamp_offset_s, audio)
beam.metrics.Metrics.counter('beaminference', 'examples_processed').inc()
return [example]
def get_config(config_key: str):
"""Get a config given its keyed name."""
module_key = '..{}'.format(config_key)
config = importlib.import_module(
module_key, INFERENCE_CONFIGS_PKG
).get_config()
logging.info('Loaded config %s', config_key)
logging.info('Config output location : %s', config.output_dir)
return config
def maybe_write_config(parsed_config, output_dir):
config_json = parsed_config.to_json(indent=2)
if (output_dir / 'config.json').exists():
with (output_dir / 'config.json').open('r') as f:
got_json = f.read()
if config_json == got_json:
return
with (output_dir / 'config.json').open('w') as f:
f.write(config_json)
def load_embedding_config(embeddings_path):
"""Loads the configuration to generate unlabeled embeddings."""
embeddings_path = epath.Path(embeddings_path)
with (embeddings_path / 'config.json').open() as f:
embedding_config = config_dict.ConfigDict(json.loads(f.read()))
return embedding_config
def build_run_pipeline(base_pipeline, output_dir, source_infos, embed_fn):
"""Create and run a beam pipeline."""
_ = (
base_pipeline
| beam.Create(source_infos)
| beam.ParDo(embed_fn)
# When a file is corrupted and can't be loaded EmbedFn
# returns None. In this case the lambda below returns false, which then
# filters it out.
| beam.Filter(lambda x: x)
| beam.Reshuffle()
| beam.io.tfrecordio.WriteToTFRecord(
os.path.join(output_dir, 'embeddings'),
coder=beam.coders.ProtoCoder(tf.train.Example),
)
)
metrics = base_pipeline.run().metrics()
return metrics
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating TF Examples."""
import dataclasses
import os
from typing import Sequence
from chirp.inference import interface
from etils import epath
import numpy as np
import tensorflow as tf
# Feature keys.
FILE_NAME = 'filename'
TIMESTAMP_S = 'timestamp_s'
EMBEDDING = 'embedding'
EMBEDDING_SHAPE = 'embedding_shape'
LOGITS = 'logits'
SEPARATED_AUDIO = 'separated_audio'
SEPARATED_AUDIO_SHAPE = 'separated_audio_shape'
RAW_AUDIO = 'raw_audio'
RAW_AUDIO_SHAPE = 'raw_audio_shape'
def get_feature_description(logit_names: Sequence[str] | None = None):
"""Create a feature description for the TFExamples.
Each tensor feature includes both a serialized tensor and a 'shape' feature.
The tensor feature can be parsed with tf.io.parse_tensor, and then reshaped
according to the shape feature.
Args:
logit_names: Name of logit features included in the examples.
Returns:
Feature description dict for parsing TF Example protos.
"""
feature_description = {
FILE_NAME: tf.io.FixedLenFeature([], tf.string),
TIMESTAMP_S: tf.io.FixedLenFeature([], tf.float32),
EMBEDDING: tf.io.FixedLenFeature([], tf.string, default_value=''),
EMBEDDING_SHAPE: tf.io.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True
),
SEPARATED_AUDIO: tf.io.FixedLenFeature([], tf.string, default_value=''),
SEPARATED_AUDIO_SHAPE: tf.io.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True
),
RAW_AUDIO: tf.io.FixedLenFeature([], tf.string, default_value=''),
RAW_AUDIO_SHAPE: tf.io.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True
),
}
if logit_names is not None:
for logit_name in logit_names:
feature_description[logit_name] = tf.io.FixedLenFeature(
[], tf.string, default_value=''
)
feature_description[f'{logit_name}_shape'] = (
tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True)
)
return feature_description
def get_example_parser(logit_names: Sequence[str] | None = None):
"""Create a parser for decoding inference library TFExamples."""
features = get_feature_description(logit_names=logit_names)
def _parser(ex):
ex = tf.io.parse_single_example(ex, features)
tensor_keys = [EMBEDDING, SEPARATED_AUDIO, RAW_AUDIO]
if logit_names is not None:
tensor_keys.extend(logit_names)
for key in tensor_keys:
# Note that we can't use implicit truthiness for string tensors.
# We are also required to have the same tensor structure and dtype in
# both conditional branches. So we use an empty tensor when no
# data is present to parse.
if ex[key] != tf.constant(b'', dtype=tf.string):
ex[key] = tf.io.parse_tensor(ex[key], tf.float32)
else:
ex[key] = tf.zeros_like([], dtype=tf.float32)
return ex
return _parser
def create_embeddings_dataset(
embeddings_dir, file_glob: str = '*', prefetch: int = 128
):
"""Create a TF Dataset of the embeddings."""
embeddings_dir = epath.Path(embeddings_dir)
embeddings_files = [fn.as_posix() for fn in embeddings_dir.glob(file_glob)]
ds = tf.data.TFRecordDataset(
embeddings_files, num_parallel_reads=tf.data.AUTOTUNE
)
parser = get_example_parser()
ds = ds.map(parser, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.prefetch(prefetch)
return ds
def serialize_tensor(tensor: np.ndarray) -> np.ndarray:
serialized = tf.io.serialize_tensor(tensor)
return serialized.numpy()
def model_outputs_to_tf_example(
model_outputs: interface.InferenceOutputs,
file_id: str,
audio: np.ndarray,
timestamp_offset_s: float,
write_embeddings: bool,
write_logits: bool | Sequence[str],
write_separated_audio: bool,
write_raw_audio: bool,
) -> tf.train.Example:
"""Create a TFExample from InferenceOutputs."""
feature = {
FILE_NAME: bytes_feature(bytes(file_id, encoding='utf8')),
TIMESTAMP_S: float_feature(timestamp_offset_s),
}
if write_embeddings and model_outputs.embeddings is not None:
feature[EMBEDDING] = bytes_feature(
serialize_tensor(model_outputs.embeddings)
)
feature[EMBEDDING_SHAPE] = (int_feature(model_outputs.embeddings.shape),)
# Handle writing logits.
if model_outputs.logits is not None and write_logits:
logit_keys = tuple(model_outputs.logits.keys())
if not isinstance(write_logits, bool):
# Then it's a Sequence[str], so we only keep the relevant keys.
logit_keys = tuple(k for k in logit_keys if k in write_logits)
for logits_key in logit_keys:
logits = model_outputs.logits[logits_key]
feature[logits_key] = bytes_feature(serialize_tensor(logits))
feature[logits_key + '_shape'] = int_feature(logits.shape)
if write_separated_audio and model_outputs.separated_audio is not None:
feature[SEPARATED_AUDIO] = bytes_feature(
serialize_tensor(model_outputs.separated_audio)
)
feature[SEPARATED_AUDIO_SHAPE] = int_feature(
model_outputs.separated_audio.shape
)
if write_raw_audio:
feature[RAW_AUDIO] = bytes_feature(
serialize_tensor(tf.constant(audio, dtype=tf.float32))
)
feature[RAW_AUDIO_SHAPE] = int_feature(audio.shape)
ex = tf.train.Example(features=tf.train.Features(feature=feature))
return ex
@dataclasses.dataclass
class EmbeddingsTFRecordMultiWriter:
"""A sharded TFRecord writer."""
output_dir: str
filename_pattern: str = 'embeddings-%05d-of-%05d'
num_files: int = 10
_writer_index: int = 0
def write(self, record: str):
"""Write a serialized record."""
writer = self.writers[self._writer_index]
writer.write(record)
self._writer_index = (self._writer_index + 1) % self.num_files
def flush(self):
"""Flush all files."""
for writer in self.writers:
writer.flush()
def close(self):
"""Close all files."""
for writer in self.writers:
writer.close()
def __enter__(self):
self.writers = []
for i in range(self.num_files):
filepath = os.path.join(
self.output_dir, self.filename_pattern % (i, self.num_files)
)
self.writers.append(tf.io.TFRecordWriter(filepath))
return self
def __exit__(self, *args):
self.flush()
self.close()
def bytes_feature(x, default=''):
if x is None:
x = default
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[x]))
def int_feature(x, default=-1):
if x is None:
x = default
if hasattr(x, 'count'):
return tf.train.Feature(int64_list=tf.train.Int64List(value=x))
return tf.train.Feature(int64_list=tf.train.Int64List(value=[x]))
def float_feature(x, default=0.0):
if x is None:
x = default
if hasattr(x, 'count'):
return tf.train.Feature(float_list=tf.train.FloatList(value=x))
return tf.train.Feature(float_list=tf.train.FloatList(value=[x]))
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for user-facing colab notebooks."""
import warnings
from absl import logging
from chirp import config_utils
from chirp.configs import config_globals
from chirp.inference import embed_lib
import numpy as np
import tensorflow as tf
def initialize(use_tf_gpu: bool = True, disable_warnings: bool = True):
"""Apply notebook conveniences.
Args:
use_tf_gpu: If True, allows GPU use and sets Tensorflow to 'memory growth'
mode (instead of reserving all available GPU memory at once). If False,
Tensorflow is restricted to CPU operation. Must run before any TF
computations to be effective.
disable_warnings: If True, disables printed warnings from library code.
"""
if disable_warnings:
logging.set_verbosity(logging.ERROR)
warnings.filterwarnings('ignore')
if not use_tf_gpu:
tf.config.experimental.set_visible_devices([], 'GPU')
else:
for gpu in tf.config.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
def prstats(title: str, ar: np.ndarray):
"""Print summary statistics for an array."""
tmpl = (
'% 16s : \tshape: % 16s\tmin: %6.2f\tmean: %6.2f\tmax: %6.2f\tstd: %6.2f'
)
print(
tmpl
% (title, np.shape(ar), np.min(ar), np.mean(ar), np.max(ar), np.std(ar))
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embed audio data with BirdNET."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
_object_config = config_utils.object_config
def get_config() -> config_dict.ConfigDict:
"""Create the Caples inference config."""
# Attention-based 5s model.
config = config_dict.ConfigDict()
config.output_dir = ''
config.source_file_patterns = ['soundscapes/*.wav']
# Note that the model path should be either the location of the '.tflite'
# file or the directory contraining the 'saved_model.pb'.
model_path = ''
config.num_shards_per_file = 1
config.shard_len_s = 60
# Number of workers when using the Beam DirectRunner on a single machine.
config.num_direct_workers = 8
config.embed_fn_config = {
'write_embeddings': True,
'write_logits': True,
'write_separated_audio': False,
'write_raw_audio': True,
'model_key': 'birdnet',
'model_config': {
'model_path': model_path,
'window_size_s': 3.0,
'hop_size_s': 3.0,
'sample_rate': 48000,
},
}
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embed audio data without applying source separation."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
_object_config = config_utils.object_config
def get_config() -> config_dict.ConfigDict:
"""Create the raw soundscapes inference config."""
# Attention-based 5s model.
config = config_dict.ConfigDict()
config.output_dir = ''
config.source_file_patterns = []
model_checkpoint_path = ''
config.num_shards_per_file = 120
config.shard_len_s = 60
# Number of workers when using the Beam DirectRunner on a single machine.
config.num_direct_workers = 8
config.embed_fn_config = {
'write_embeddings': True,
'write_logits': False,
'write_separated_audio': False,
'write_raw_audio': False,
'file_id_depth': 1,
'model_key': 'taxonomy_model_tf',
'model_config': {
'model_path': model_checkpoint_path,
'window_size_s': 5.0,
'hop_size_s': 5.0,
'sample_rate': 32000,
},
}
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embed audio data using both a seapration and embedding model."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
_object_config = config_utils.object_config
def get_config() -> config_dict.ConfigDict:
"""Create the Caples inference config."""
# Attention-based 5s model.
config = config_dict.ConfigDict()
config.output_dir = ''
config.source_file_patterns = []
sep_model_checkpoint_path = ''
emb_model_checkpoint_path = ''
config.num_shards_per_file = 120
config.shard_len_s = 60
# Number of workers when using the Beam DirectRunner on a single machine.
config.num_direct_workers = 8
config.embed_fn_config = {
'write_embeddings': True,
'write_logits': False,
'write_separated_audio': False,
'write_raw_audio': False,
'model_key': 'separate_embed_model',
'model_config': {
'sample_rate': 32000,
'taxonomy_model_tf_config': {
'model_path': emb_model_checkpoint_path,
'window_size_s': 5.0,
'hop_size_s': 5.0,
'sample_rate': 32000,
},
'separator_model_tf_config': {
'model_path': sep_model_checkpoint_path,
'sample_rate': 32000,
'frame_size': 32000,
},
},
}
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embed audio data using both a seapration and embedding model."""
from chirp import config_utils
from ml_collections import config_dict
_c = config_utils.callable_config
_object_config = config_utils.object_config
def get_config() -> config_dict.ConfigDict:
"""Create the Separated Seabird inference config."""
# Attention-based 5s model.
config = config_dict.ConfigDict()
config.output_dir = ''
config.source_file_patterns = ['soundscapes/*.wav']
sep_model_checkpoint_path = ''
emb_model_checkpoint_path = ''
# Raw audio files are several hours long each
config.num_shards_per_file = 720
config.shard_len_s = 60
# Number of workers when using the Beam DirectRunner on a single machine.
config.num_direct_workers = 8
config.embed_fn_config = {
'file_id_depth': 0,
'write_embeddings': True,
'write_logits': False,
'write_separated_audio': False,
'write_raw_audio': False,
'model_key': 'separate_embed_model',
'model_config': {
'sample_rate': 32000,
'taxonomy_model_tf_config': {
'model_path': emb_model_checkpoint_path,
'window_size_s': 5.0,
'hop_size_s': 5.0,
'sample_rate': 32000,
},
'separator_model_tf_config': {
'model_path': sep_model_checkpoint_path,
'sample_rate': 32000,
'frame_size': 32000,
},
},
}
return config
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pipeline functions."""
import dataclasses
from typing import Iterable, Sequence
from absl import logging
from chirp import audio_utils
from chirp.models import frontend
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
import jax
from jax import numpy as jnp
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_io as tfio
Features = dict[str, tf.Tensor]
class FeaturesPreprocessOp:
"""Preprocessing op which applies changes to specific features."""
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
return features.copy()
def get_sample_rate(self, dataset_info):
# Use the explicit sample_rate param if available.
if hasattr(self, 'sample_rate') and self.sample_rate is not None:
return self.sample_rate
# Otherwise, the sample_rate described by the dataset_info.
return dataset_info.features['audio'].sample_rate
class DatasetPreprocessOp:
"""Preprocessing op which transforms the dataset."""
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
return dataset
def get_sample_rate(self, dataset_info):
# Use the explicit sample_rate param if available.
if hasattr(self, 'sample_rate') and self.sample_rate is not None:
return self.sample_rate
# Otherwise, the sample_rate described by the dataset_info.
return dataset_info.features['audio'].sample_rate
@dataclasses.dataclass
class Pipeline:
"""Construct a pipeline of preprocessing operations.
This is modelled after `clu.preprocess_spec`, but rewritten to allow for
processing operations which cannot be expressed per sample (e.g., mixing
samples). Additionally, preprocessing operations will have access to the
metadata in the DatasetInfo object.
Attributes:
ops: The preprocessing operations to apply.
num_parallel_calls: Passed to `dataset.map`.
deterministic: Whether the ordering of the samples should be deterministic.
"""
ops: Sequence[FeaturesPreprocessOp | DatasetPreprocessOp]
num_parallel_calls: int = tf.data.AUTOTUNE
deterministic: bool = False
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
# We group feature preprocessing operations into a single map operation to
# reduce the number of threads
feature_preprocess_ops = []
for op in self.ops:
if isinstance(op, FeaturesPreprocessOp):
feature_preprocess_ops.append(op)
else:
if feature_preprocess_ops:
dataset = dataset.map(
map_func=self.chain(feature_preprocess_ops, dataset_info),
num_parallel_calls=self.num_parallel_calls,
deterministic=self.deterministic,
)
feature_preprocess_ops.clear()
dataset = op(dataset, dataset_info)
if feature_preprocess_ops:
dataset = dataset.map(
map_func=self.chain(feature_preprocess_ops, dataset_info),
num_parallel_calls=self.num_parallel_calls,
deterministic=self.deterministic,
)
return dataset
@staticmethod
def chain(
ops: Sequence[FeaturesPreprocessOp], dataset_info: tfds.core.DatasetInfo
):
def map_func(features: Features) -> Features:
for op in ops:
features = op(features, dataset_info)
return features
return map_func
@dataclasses.dataclass
class Pad(FeaturesPreprocessOp):
"""Pads the last axis to a minimum length.
Attributes:
pad_size: The minimum length to pad to.
random: If true, pads a random amount left and right. If false, will pad the
end only.
add_mask: Whether to add a new mask feature indicating where the padding
appears in the named features.
names: The name of the features to pad.
sample_rate: Optional sample rate. Reads from dataset_info if not provided.
"""
pad_size: float
random: bool = True
add_mask: bool = True
names: tuple[str, ...] = ('audio',)
sample_rate: int | None = None
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
sample_rate = self.get_sample_rate(dataset_info)
window_size = tf.cast(self.pad_size * sample_rate, tf.int32)
features = features.copy()
for name in self.names:
if name not in features:
continue
padding = tf.reduce_max([window_size - tf.shape(features[name])[-1], 0])
if self.random:
left_pad = tf.random.uniform(
shape=(), minval=0, maxval=padding + 1, dtype=tf.int32
)
right_pad = padding - left_pad
else:
left_pad = 0
right_pad = padding
paddings = ((0, 0),) * (tf.rank(features[name]) - 1) + (
(left_pad, right_pad),
)
mask = tf.ones_like(features[name])
padded_mask = tf.pad(mask, paddings)
if self.add_mask:
features[f'{name}_mask'] = padded_mask
features[name] = tf.pad(features[name], paddings)
return features
@dataclasses.dataclass
class Slice(FeaturesPreprocessOp):
"""Slices a window of the input.
Selects a window of the input data. Slices over the last axis.
Attributes:
window_size: The size of the window to take, in seconds.
start: The starting point of the window, in seconds.
names: The name of the features to slice. Each will be sliced the same way.
sample_rate: Optional sample rate. Reads from dataset_info if not provided.
"""
window_size: float
start: float
names: tuple[str, ...] = ('audio', 'source_audio', 'audio_mask')
sample_rate: int | None = None
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
sample_rate = self.get_sample_rate(dataset_info)
window_size = tf.cast(self.window_size * sample_rate, tf.int64)
start = tf.cast(self.start * sample_rate, tf.int64)
features = features.copy()
for name in self.names:
if name not in features:
continue
features[name] = features[name][..., start : start + window_size]
return features
@dataclasses.dataclass
class RandomSlice(FeaturesPreprocessOp):
"""Slices a random window of the input.
Selects a random window of the input data. Slices over the last axis.
Attributes:
window_size: The size of the window to take, in seconds.
names: The name of the features to slice. Each will be sliced the same way.
sample_rate: Optional sample rate. Reads from dataset_info if not provided.
"""
window_size: float
names: tuple[str, ...] = ('audio', 'source_audio', 'audio_mask')
sample_rate: int | None = None
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
sample_rate = self.get_sample_rate(dataset_info)
audio_len = tf.shape(features[self.names[0]])[-1] / sample_rate
max_start = tf.cast(audio_len - self.window_size, tf.float32)
start = tf.random.uniform(shape=(), minval=0, maxval=max_start)
return Slice(self.window_size, start, self.names)(features, dataset_info)
@dataclasses.dataclass
class NormalizeAudio(FeaturesPreprocessOp):
"""Normalize audio.
Scales the signal so that the gain (maximum amplitude of the signal) is
equal to the target gain. Assumes the signal is on the last axis.
Attributes:
target_gain: The target gain.
names: The name of the features to normalize. The first will be used to
calculate the normalization standard.
eps: An epsilon that is used to avoid division by zero.
"""
target_gain: float
names: tuple[str, ...] = ('audio', 'source_audio')
eps: float = 0.01
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
del dataset_info # Unused
max_gain = tf.reduce_max(
tf.abs(features[self.names[0]]), axis=-1, keepdims=True
)
gain_scalar = self.target_gain / (max_gain + self.eps)
features = features.copy()
for name in self.names:
if name not in features:
continue
features[name] = features[name] * tf.reshape(
gain_scalar,
tf.concat(
[
tf.shape(gain_scalar),
tf.ones(
[tf.rank(features[name]) - tf.rank(gain_scalar)],
dtype=tf.int32,
),
],
axis=0,
),
)
return features
@dataclasses.dataclass
class RandomNormalizeAudio(FeaturesPreprocessOp):
"""Normalize audio using a random target gain.
Scales the signal so that the gain (maximum amplitude of the signal) is
equal to a target gain selected uniformly at random.
Attributes:
min_gain: The minimum target gain.
max_gain: The minimum target gain.
names: The name of the features to normalize. The first will be used to
calculate the normalization standard.
eps: An epsilon that is used to avoid division by zero.
"""
min_gain: float
max_gain: float
names: tuple[str, ...] = ('audio', 'source_audio')
eps: float = 0.01
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
target_gain = tf.random.uniform(
[], minval=self.min_gain, maxval=self.max_gain
)
return NormalizeAudio(
target_gain=target_gain, names=self.names, eps=self.eps
)(features, dataset_info)
@dataclasses.dataclass
class ResampleAudio(FeaturesPreprocessOp):
"""Resample audio features to a target sample rate."""
target_sample_rate: int
feature_name: str = 'audio'
sample_rate: int | None = None
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
source_sample_rate = self.get_sample_rate(dataset_info)
features = features.copy()
audio = features[self.feature_name]
if len(audio.shape) == 2:
# Assume [Batch, Samples], expand to [B, S, Channels] to match
# tfio assumptions.
audio = audio[:, :, tf.newaxis]
elif len(audio.shape) != 1:
raise ValueError(f'Unexpected audio shape. ({audio.shape})')
features[self.feature_name] = tfio.audio.resample(
audio, rate_in=source_sample_rate, rate_out=self.target_sample_rate
)
if len(features[self.feature_name].shape) == 3:
features[self.feature_name] = tf.squeeze(
features[self.feature_name], axis=2
)
return features
@dataclasses.dataclass
class MixAudio(DatasetPreprocessOp):
"""Mix audio samples.
Attributes:
mixin_prob: The probability of mixing a single example with a single other
example. For a probability p this results in an unnormalized target
distribution of (1 - p, p / 2). If this is given, target_dist cannot be
given and vice versa.
target_dist: The target distribution of mixtures containing 1, 2, ...
sources. Does not have to be normalized. For example, (1., 1.) will result
in half of the examples being raw examples, and the other half being
mixtures of two examples.
name: The name of the featuere to be mixed.
source_name: The unmixed channels will be stored in this feature.
pad_names: These labels must be padded to zeros.
label_names: The names of the labels and masks, which will be combined using
an OR operation in the case of mixing.
axis: The axis that should contain the mixed samples (for the source audio
feature as well as the padded features). This should be set to the number
of batch axes (e.g., 0 if this is applied before batching, 1 if applied
after batching, and 2 if applied after batching with splitting across
devices).
"""
mixin_prob: float | None = None
target_dist: tuple[float, ...] | None = None
name: str = 'audio'
source_name: str = 'source_audio'
pad_names: tuple[str, ...] = (
'segment_start',
'segment_end',
'recording_id',
'segment_id',
)
label_names: tuple[str, ...] = (
'label',
'genus',
'family',
'order',
'bg_labels',
'label_mask',
'genus_mask',
'family_mask',
'order_mask',
'bg_labels_mask',
'audio_mask',
)
axis: int = 0
def __post_init__(self):
if not (self.mixin_prob is None) ^ (self.target_dist is None):
raise ValueError('either mixin_prob or target_dist must be set')
if self.target_dist is None:
self.target_dist = (1 - self.mixin_prob, self.mixin_prob / 2)
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
del dataset_info # Unused
return dataset.group_by_window(
self._key_func, self._reduce_func, window_size_func=lambda i: i + 1
)
def _key_func(self, features: Features) -> tf.Tensor:
del features
target_dist = tf.constant(self.target_dist, dtype=tf.float32)
sample_dist = target_dist * (
tf.range(len(self.target_dist), dtype=tf.float32) + 1.0
)
return tf.squeeze(tf.random.categorical(tf.math.log([sample_dist]), 1))
def _reduce_func(
self, key: tf.Tensor, dataset: tf.data.Dataset
) -> tf.data.Dataset:
key = tf.cast(key, tf.int32)
# pylint: disable=g-long-lambda
return tf.switch_case(
key,
[
lambda i=i: dataset.batch(i + 1, drop_remainder=True).map(
self._mix_audio
)
for i in range(len(self.target_dist))
],
)
@staticmethod
def _pad_along_axis(tensor, paddings, axis, **kwargs):
zero_paddings = tf.zeros([tf.rank(tensor), 2], dtype=tf.int32)
paddings = tf.concat(
[zero_paddings[:axis], [paddings], zero_paddings[axis + 1 :]], axis=0
)
return tf.pad(tensor, paddings, **kwargs)
def _mix_audio(self, features: Features) -> Features:
"""Mixes the samples."""
for name in self.label_names:
if name not in features:
continue
features[name] = tf.reduce_max(features[name], axis=0)
source_audio = features[self.name]
features[self.name] = tf.reduce_sum(source_audio, axis=0)
# To enable batching we pad with zeros
if source_audio.shape[0] < len(self.target_dist):
p = len(self.target_dist) - source_audio.shape[0]
source_audio = self._pad_along_axis(source_audio, [0, p], axis=0)
if self.axis:
source_audio = tf.experimental.numpy.swapaxes(
source_audio, 0, self.axis
)
for name in self.pad_names:
if name not in features:
continue
features[name] = self._pad_along_axis(features[name], [0, p], axis=0)
if self.axis:
features[name] = tf.experimental.numpy.swapaxes(
features[name], 0, self.axis
)
features[self.source_name] = source_audio
return features
@dataclasses.dataclass
class MultiHot(FeaturesPreprocessOp):
"""Convert labels to multi-hot representation.
This must be done before batching.
Attributes:
names: The labels to convert to multi-hot representations.
"""
names: tuple[str, ...] = ('label', 'genus', 'family', 'order', 'bg_labels')
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
features = features.copy()
for name in self.names:
if name not in features:
continue
features[name] = tf.clip_by_value(
tf.reduce_sum(
tf.one_hot(
features[name],
dataset_info.features[name].feature.num_classes,
dtype=tf.int32,
),
axis=0,
),
0,
1,
)
return features
@dataclasses.dataclass
class MergeBackgroundLabels(FeaturesPreprocessOp):
"""Include background labels in the set of labels for each example."""
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
features = features.copy()
features['label'] = tf.clip_by_value(
features['label'] + features['bg_labels'], 0, 1
)
features['label_mask'] = tf.clip_by_value(
features['label_mask'] + features['bg_labels_mask'], 0, 1
)
return features
@dataclasses.dataclass
class AddChannel(FeaturesPreprocessOp):
name: str = 'audio'
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
features = features.copy()
features[self.name] = tf.expand_dims(features[self.name], axis=-1)
return features
@dataclasses.dataclass
class MelSpectrogram(FeaturesPreprocessOp):
"""Convert audio to a spectrogram.
Attributes:
features: The number of channels to create.
kernel_size: The kernel size to use.
stride: The stride to use.
sample_rate: The sample rate of the original audio.
freq_range: The frequency range to capture.
name: The name of the feature to process.
power: The power of the magnitude spectrogram.
scaling_config: The magnitude scaling to use.
nfft: Length of the FFT used, if a zero padded FFT is desired.
"""
features: int
kernel_size: int
stride: int
sample_rate: int
freq_range: tuple[int, int]
name: str = 'audio'
power: float = 2.0
scaling_config: frontend.ScalingConfig | None = None
nfft: int | None = None
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
features = features.copy()
stfts = audio_utils.stft_tf(
features[self.name],
nperseg=self.kernel_size,
noverlap=self.kernel_size - self.stride,
nfft=self.nfft,
padded=False,
)
if tf.shape(features[self.name])[-1] % self.stride == 0:
stfts = stfts[..., :-1]
stfts = tf.experimental.numpy.swapaxes(stfts, -1, -2)
magnitude_spectrograms = tf.math.abs(stfts) ** self.power
num_spectrogram_bins = magnitude_spectrograms.shape[-1]
mel_matrix = tf.signal.linear_to_mel_weight_matrix(
self.features, num_spectrogram_bins, self.sample_rate, *self.freq_range
)
mel_spectrograms = magnitude_spectrograms @ mel_matrix
def log_scale(x, floor, offset, scalar):
"""TensorFlow port of audio_utils.log_scale."""
return scalar * tf.math.log(tf.maximum(x, floor) + offset)
if isinstance(self.scaling_config, frontend.LogScalingConfig):
# TODO(bartvm): Probably needs standardization step to stabilize training.
features[self.name] = log_scale(
mel_spectrograms, **dataclasses.asdict(self.scaling_config)
)
elif self.scaling_config is None:
features[self.name] = mel_spectrograms
else:
raise ValueError('unknown scaling config')
return features
@dataclasses.dataclass
class MFCC(FeaturesPreprocessOp):
"""Convert a spectrogram to MFC coefficients.
This op assumes that the audio has already been processed into a log-magnitude
mel-scale spectrogram.
Attributes:
num_coefficients: The number of MFC coefficients to retain.
aggregate_over_time: If True, aggregates the MFC coefficients over time into
four summary statistics: mean, standard deviation, min, and max, resulting
in four feature vectors of shape `num_coefficients` that are then
concatenated into a single feature vector. This mirrors the processing
done in the BEANS benchmark (Hagiwara et al., 2022).
name: The name of the feature to process.
"""
num_coefficients: int
aggregate_over_time: bool = True
name: str = 'audio'
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
del dataset_info
features = features.copy()
features[self.name] = tf.signal.mfccs_from_log_mel_spectrograms(
features[self.name]
)[..., : self.num_coefficients]
if self.aggregate_over_time:
mean, variance = tf.nn.moments(features[self.name], axes=[-2])
features[self.name] = tf.concat(
[
mean,
tf.sqrt(variance),
tf.reduce_min(features[self.name], axis=-2),
tf.reduce_max(features[self.name], axis=-2),
],
axis=-1,
)
return features
@dataclasses.dataclass
class LabelsToString(FeaturesPreprocessOp):
"""Converts labels to a string representation.
Label values are joined using `separator`.
Attributes:
names: The labels to convert to a string representation.
separator: The separator character to use.
"""
names: tuple[str, ...] = ('label', 'genus', 'family', 'order', 'bg_labels')
separator: str = ' '
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
features = features.copy()
for name in self.names:
if name not in features:
continue
features[name] = tf.strings.reduce_join(
tf.gather(
tf.constant(dataset_info.features[name].feature.names),
features[name],
),
separator=self.separator,
)
return features
@dataclasses.dataclass
class LabelConversionConstants:
"""TF constants created while executing `ConvertBirdTaxonomyLabels`.
Attributes:
tables: a mapping from feature name to StaticHashTable for label conversion.
masks: a mapping from feature name to mask for the translated labels.
"""
tables: dict[str, tf.lookup.StaticHashTable]
masks: dict[str, tf.Tensor]
@dataclasses.dataclass
class ConvertBirdTaxonomyLabels(FeaturesPreprocessOp):
"""Convert to a target set of classes and generate taxonomy labels."""
source_namespace: str = 'ebird2021'
target_class_list: str = 'ebird2021'
species_feature_name: str = 'label'
species_bg_label_name: str = 'bg_labels'
add_taxonomic_labels: bool = True
# Whether to add output features indicating which classes are represented
# in the source dataset.
output_masks: bool = True
# The following members are for cached / stateful data.
db: namespace_db.TaxonomyDatabase | None = None
def __post_init__(self):
# Create NamespaceDatabase in post_init to avoid loading CSVs repeatedly.
# Note that we purposefully avoid creating TF constants here. All TF
# constant need to be created within the scope of `tf.data.Dataset.map`
# (which in this case means inside __call__) so that the pipeline can be
# applied multiple times on different datasets. Otherwise, in subsequent
# pipeline applications TF will attempt to re-use previous constants
# belonging to a different tf.function.
self.db = namespace_db.load_db()
def load_tables(
self, source_class_list: namespace.ClassList
) -> LabelConversionConstants:
"""Construct TF StaticHashTables from namespace db info.
Args:
source_class_list: ClassList for the soruce dataset.
Returns:
TF constants needed for the execution of this preprocessing op.
"""
tables = {}
masks = {}
target_classes = self.db.class_lists[self.target_class_list]
label_table, label_mask = source_class_list.get_class_map_tf_lookup(
target_classes
)
tables[self.species_feature_name] = label_table
masks[self.species_feature_name] = label_mask
tables[self.species_bg_label_name] = label_table
masks[self.species_bg_label_name] = label_mask
# Avoid searching for taxonomic mappings if `self.add_taxonomic_labels ==
# False`, because it's possible that such a mapping doesn't exist.
if self.add_taxonomic_labels:
for key in ['genus', 'family', 'order']:
# This is surprisingly tricky to get right for mismatched eval sets.
# First map the source and target classes (eg, eval dataset species and
# model ClassList) into the target namespace (eg, genera). This creates
# two different ClassLists of genera. We then map the source genera to
# the target genera to obtain an appropriate label_mask.
namespace_mapping = self.db.mappings[
self.source_namespace + '_to_' + key
]
source_taxa_classes = source_class_list.apply_namespace_mapping(
namespace_mapping, keep_unknown=True
)
target_taxa_classes = target_classes.apply_namespace_mapping(
namespace_mapping, keep_unknown=True
)
namespace_table = source_class_list.get_namespace_map_tf_lookup(
namespace_mapping, keep_unknown=True
)
class_table, label_mask = source_taxa_classes.get_class_map_tf_lookup(
target_taxa_classes
)
tables[key + '_namespace'] = namespace_table
tables[key + '_class'] = class_table
masks[key] = label_mask
return LabelConversionConstants(tables=tables, masks=masks)
def convert_labels(
self,
features: Features,
key: str,
output_key: str,
label_conversion_constants: LabelConversionConstants,
) -> Features:
"""Get a transformation for a given ClassList."""
tables = label_conversion_constants.tables
masks = label_conversion_constants.masks
if output_key in (self.species_feature_name, self.species_bg_label_name):
table = tables[key]
label_mask = masks[key]
output_labels = table.lookup(features[key])
else:
namespace_table = tables[output_key + '_namespace']
class_table = tables[output_key + '_class']
output_labels = class_table.lookup(namespace_table.lookup(features[key]))
label_mask = masks[output_key]
# Drop unknown labels.
output_labels = tf.gather(output_labels, tf.where(output_labels >= 0)[:, 0])
# Convert to MultiHot encoding.
class_list_size = label_mask.shape[0]
output_labels = tf.clip_by_value(
tf.reduce_sum(
tf.one_hot(output_labels, class_list_size, dtype=tf.int64), axis=0
),
0,
1,
)
return {output_key: output_labels, output_key + '_mask': label_mask}
def convert_features(
self, features: Features, source_classes: namespace.ClassList
) -> Features:
"""Convert features to target class list and add taxonomy labels."""
output_features = features.copy()
label_conversion_constants = self.load_tables(source_classes)
output_features.update(
self.convert_labels(
features,
self.species_feature_name,
self.species_feature_name,
label_conversion_constants,
)
)
if self.species_bg_label_name in features:
output_features.update(
self.convert_labels(
features,
self.species_bg_label_name,
self.species_bg_label_name,
label_conversion_constants,
)
)
if not self.add_taxonomic_labels:
return output_features
output_features.update(
self.convert_labels(
features,
self.species_feature_name,
'genus',
label_conversion_constants,
)
)
output_features.update(
self.convert_labels(
features,
self.species_feature_name,
'family',
label_conversion_constants,
)
)
output_features.update(
self.convert_labels(
features,
self.species_feature_name,
'order',
label_conversion_constants,
)
)
return output_features
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
source_classes = namespace.ClassList(
self.source_namespace,
# TODO(vdumoulin): generalize this to labels beyond 'ignore'.
# Some dataset variants (e.g. bird_taxonomy/downstream_slice_peaked)
# use an 'ignore' label which is not part of the eBirds taxonomy. We
# ignore this label; the mapping tables return an 'unknown' default
# value, so all 'ignore' labels will naturally be converted to
# 'unknown'.
tuple(
n
for n in dataset_info.features[self.species_feature_name].names
if n != 'ignore'
),
)
output_features = self.convert_features(features, source_classes)
return output_features
@dataclasses.dataclass
class OnlyJaxTypes(FeaturesPreprocessOp):
"""Discards tensors that are not supported by JAX (e.g., non-numeric).
This must be done before batching.
"""
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
new_features = {}
for name, feature in features.items():
if (
isinstance(feature, tf.Tensor)
and hasattr(jnp, feature.dtype.name)
or feature.dtype is tf.bool
):
new_features[name] = feature
return new_features
@dataclasses.dataclass
class OnlyKeep(FeaturesPreprocessOp):
"""Discards features with names not in `names`.
Attributes:
names: The names of features to keep.
"""
names: Iterable[str]
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
return {
name: feature
for name, feature in features.items()
if name in self.names
}
@dataclasses.dataclass
class FilterMultiLabelRecordings(DatasetPreprocessOp):
"""Filters out recordings that have multiple foreground labels."""
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
def _predicate(features):
return tf.math.equal(tf.shape(features['label'])[0], 1)
return dataset.filter(_predicate)
@dataclasses.dataclass
class FilterByFeature(DatasetPreprocessOp):
"""Filters the dataset by feature values.
Attributes:
filtering_df_path: Path to a single-column, CSV-serialized DataFrame whose
column name represents the feature name used for the filtering operation
and whose rows contain the allowed feature values.
complement: Whether to perform the complement of the filtering operation,
i.e., swap which dataset elements are filtered and which are kept.
"""
filtering_df_path: str
complement: bool = False
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
df = pd.read_csv(self.filtering_df_path)
if len(df.columns) != 1:
raise ValueError(
'filtering_df_path should point to a single-column DataFrame.'
)
(feature_name,) = df.columns
feature_dtype = df[feature_name].dtype
feature_values = df[feature_name].values
feature_values_table = tf.lookup.StaticHashTable(
initializer=tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(feature_values, dtype=feature_dtype),
values=tf.range(len(feature_values), dtype=feature_dtype),
),
default_value=-1,
)
def _predicate(features):
value = tf.cast(features[feature_name], feature_dtype)
should_include = feature_values_table.lookup(value) > -1
if self.complement:
should_include = ~should_include
return should_include
return dataset.filter(_predicate)
@dataclasses.dataclass
class HashId(FeaturesPreprocessOp):
"""Hashes a tfds_id into a unique integer."""
num_buckets: int = int(1e9)
def __call__(
self, features: Features, dataset_info: tfds.core.DatasetInfo
) -> Features:
features['tfds_id'] = tf.strings.to_hash_bucket_fast(
features['tfds_id'], self.num_buckets
)
return features
@dataclasses.dataclass
class Shuffle(DatasetPreprocessOp):
"""Shuffles the dataset."""
shuffle_buffer_size: int
seed: int | None = None
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
return dataset.shuffle(self.shuffle_buffer_size, seed=self.seed)
@dataclasses.dataclass
class Repeat(DatasetPreprocessOp):
"""Repeats the data infinitely."""
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
return dataset.repeat()
@dataclasses.dataclass
class Batch(DatasetPreprocessOp):
"""Collects samples into batches.
This preprocessing operation drops the remainder by default.
Attributes:
batch_size: The batch size to use.
split_across_devices: If true, the minibatch will be split into smaller
minibatches to be distributed across the local devices present. This is
useful for distributed training.
drop_remainder: Whether or not to drop remainder batch. Note that in the
multi-device setting, examples will still be dropped if the dataset size
is not a multiple of the batch size divided by the number of devices.
"""
batch_size: int
split_across_devices: bool = False
drop_remainder: bool = True
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
if self.split_across_devices:
if self.batch_size % jax.device_count():
raise ValueError(
f'batch size ({self.batch_size}) must be divisible by '
f'number of devices ({jax.device_count()}).'
)
logging.info(
'Splitting batch across %d devices, with local device count %d.',
jax.device_count(),
jax.local_device_count(),
)
dataset = dataset.batch(
self.batch_size // jax.device_count(), drop_remainder=True
)
return dataset.batch(
jax.local_device_count(), drop_remainder=self.drop_remainder
)
else:
return dataset.batch(self.batch_size, drop_remainder=self.drop_remainder)
@dataclasses.dataclass
class ExtractStridedWindows(DatasetPreprocessOp):
"""Extracts strided windows from examples.
Attributes:
window_length_sec: The window interval length to use, in seconds.
window_stride_sec: The stride over which to slide the window.
pad_end: Whether to pad the end of the recording. If True, window positions
that are past the end of the recording are padded with zeros until the
window moves fully past the end of the recording. Otherwise, only window
positions that fully overlap the recording are considered.
sample_rate: Optional sample rate. Reads from dataset_info if not provided.
"""
window_length_sec: float
window_stride_sec: float
pad_end: bool = True
sample_rate: int | None = None
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
sample_rate = self.get_sample_rate(dataset_info)
window_length = int(sample_rate * self.window_length_sec)
window_stride = int(sample_rate * self.window_stride_sec)
def map_fn(example):
example['audio'] = tf.signal.frame(
signal=example['audio'],
frame_length=window_length,
frame_step=window_stride,
pad_end=self.pad_end,
)
# At this point, example['audio'] has shape [num_windows, window_length].
# We assign a unique sequential ID in [0, num_windows - 1] to each window.
example['segment_id'] = tf.range(
tf.shape(example['audio'])[0], dtype=tf.int64
)
example['segment_start'] = tf.cast(
example['segment_id'] * window_stride, example['segment_start'].dtype
)
example['segment_end'] = tf.cast(
example['segment_start'] + window_length, example['segment_end'].dtype
)
# Other features are shared across slices, so we repeat them across the
# first axis.
feature_names = ('audio', 'segment_id', 'segment_start', 'segment_end')
for key, value in (
(key, value)
for key, value in example.items()
if key not in feature_names
):
value = tf.expand_dims(value, 0)
value = tf.tile(
value,
[tf.shape(example['audio'])[0]] + [1] * (value.shape.ndims - 1),
)
example[key] = value
return example
# Unbatching yields slices one by one.
return dataset.map(map_fn).unbatch()
@dataclasses.dataclass
class DenselyAnnotateWindows(DatasetPreprocessOp):
"""Densely annotates sliding windows of the dataset's 'audio'.
After extracting slided windows on the dataset's 'audio' feature, this
preprocessing distributes the labels corresponding to each annotated segment
to all windows that intersect in time within a given threshold. Each window is
assigned all labels that are included within each overlapping annotation and
the 'annotation_start' and 'annotation_end' features. In the case where a
given window overlaps with more than one annotation, that window is assigned
the labels of each annotation.
Process: compare each 'audio' window's 'segment_start' and 'segment_end' times
with the time delimiters in its 'annotation_start' and 'annotation_end'; if
there is an absolute overlap of at least `overlap_threshold_sec` with the
segment bounds, the window receives the segment labels.
Attributes:
overlap_threshold_sec: The minimum overlap, in seconds, between a window and
a labeled segment for the former to inherit its label. This overlap is
translated into a number of audio samples using the dataset's sampling
rate. If None, we set the threshold to one audio sample.
drop_annotation_bounds: If True, remove the 'annotation_start' and
'annotation_end' features. If False, the annotation bound features are
placed in an array of size [num_labels], with zeros for entries where no
label is present. This allows downstream batching, since the features are
of fixed size. (We also add features for annotation_size and
intersection_size for downstream debugging and analysis.)
sample_rate: Optional sample rate. Reads from dataset_info if not provided.
"""
overlap_threshold_sec: float | None = None
drop_annotation_bounds: bool = False
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
sample_rate = self.get_sample_rate(dataset_info)
overlap_threshold = (
1
if self.overlap_threshold_sec is None
else int(sample_rate * self.overlap_threshold_sec)
)
def map_fn(example):
example = example.copy()
# A window and an annotated segment overlaps (by at least
# `overlap_threshold`) if the following is true:
# max(segment_start, annotation_start)
# <= min(segment_end, annotation_end) - overlap_threshold
# Note that `example['segment_{start|end}']` is uint64-valued and
# `example['annotation_{start|end}']` is a variable-length sequence of
# integers and the operation is broadcasted across all segments.
# Find the start and end of he intersection of the annotation and segment.
# If inter_end < inter_start, the intersection is empty.
inter_end = tf.cast(
tf.minimum(example['segment_end'], example['annotation_end']),
tf.int64,
)
inter_start = tf.cast(
tf.maximum(example['segment_start'], example['annotation_start']),
tf.int64,
)
overlap_comparison = tf.cast(
inter_end - inter_start - overlap_threshold >= 0, tf.bool
)
overlap_indices = tf.reshape(tf.where(overlap_comparison), [-1])
if self.drop_annotation_bounds:
del example['annotation_start']
del example['annotation_end']
else:
# Add per-label annotation metadata. When a label is not present, these
# data default to zero.
# Note: In case a segment has multiple annotations for the same species,
# only one annotation will be described by these metadata.
num_classes = len(dataset_info.features['label'].names)
label_idxs = tf.gather(example['label'], overlap_indices)
example['intersection_size'] = tf.maximum(inter_end - inter_start, 0)
example['annotation_length'] = tf.cast(
example['annotation_end'], tf.int64
) - tf.cast(example['annotation_start'], tf.int64)
for k in (
'annotation_start',
'annotation_end',
'intersection_size',
'annotation_length',
):
example[k] = tf.cast(tf.gather(example[k], overlap_indices), tf.int64)
example[k] = tf.scatter_nd(
indices=label_idxs[:, tf.newaxis],
updates=example[k],
shape=[num_classes],
)
example['label'] = tf.gather(example['label'], overlap_indices)
return example
# TODO(tomdenton): We should refactor this into a FeaturesPreprocessOp.
# Refactoring will allow grouping it with other ops and
# reduce the total number of dataset.map calls, thus saving parallelism.
return dataset.map(map_fn)
@dataclasses.dataclass
class Cache(DatasetPreprocessOp):
"""Caches the dataset.
Attributes:
filename: Where to cache the dataset. If left empty, the dataset is cached
in memory.
"""
filename: str = ''
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
del dataset_info
return dataset.cache(filename=self.filename)
@dataclasses.dataclass
class FilterDropLabel(DatasetPreprocessOp):
"""Drop any examples with the target label."""
target_label: str = 'unknown'
def __call__(
self, dataset: tf.data.Dataset, dataset_info: tfds.core.DatasetInfo
) -> tf.data.Dataset:
label_names = dataset_info.features['label'].names
if self.target_label not in label_names:
return dataset
filter_idx = label_names.index(self.target_label)
def _pred(features):
return tf.math.logical_not(tf.reduce_any(filter_idx == features['label']))
return dataset.filter(_pred)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for evaluation."""
import dataclasses
import functools
import os
from typing import Callable, Iterator, Mapping, Sequence, TypeVar
from absl import logging
from chirp.data import utils as data_utils
from chirp.models import metrics
from chirp.taxonomy import namespace_db
import jax
import ml_collections
import numpy as np
import pandas as pd
import tensorflow as tf
_EMBEDDING_KEY = 'embedding'
_LABEL_KEY = 'label'
_BACKGROUND_KEY = 'bg_labels'
ConfigDict = ml_collections.ConfigDict
EvalModelCallable = Callable[[np.ndarray], np.ndarray]
_T = TypeVar('_T', bound='EvalSetSpecification')
_EVAL_REGIONS = (
'ssw',
'coffee_farms',
'hawaii',
'high_sierras',
'sierras_kahl', # Sierra Nevada
'peru',
)
# TODO(bringingjoy): Update once mismatched species codes are resolved in our
# class lists.
_MISMATCHED_SPECIES_CODES = [
'reevir1',
'gnwtea',
'grnjay',
'butwoo1',
'unknown',
]
@dataclasses.dataclass
class EvalSetSpecification:
"""A specification for an eval set.
Attributes:
class_names: Class names over which to perform the evaluation.
search_corpus_global_mask_expr: String expression passed to the embeddings
dataframe's `eval` method to obtain a boolean mask over its rows. Used to
represent global properties like `df['dataset_name'] == 'coffee_farms'`.
Computed once and combined with `search_corpus_classwise_mask_fn` for
every class in `class_names` to perform boolean indexing on the embeddings
dataframe and form the search corpus.
search_corpus_classwise_mask_fn: Function mapping a class name to a string
expression passed to the embeddings dataframe's `eval` method to obtain a
boolean mask over its rows. Used to represent classwise properties like
`~df['bg_labels'].str.contains(class_name)`. Combined with
`search_corpus_global_mask_expr` for every class in `class_names` to
perform boolean indexing on the embeddings dataframe and form the search
corpus.
class_representative_global_mask_expr: String expression passed to the
embeddings dataframe's `eval` method to obtain a boolean mask over its
rows. Used to represent global properties like `df['dataset_name'] ==
'xc_downstream'`. Computed once and combined with
`class_representative_corpus_classwise_mask_fn` for every class in
`class_names` to perform boolean indexing on the embeddings dataframe and
form the collection of class representatives.
class_representative_classwise_mask_fn: Function mapping a class name to a
string expression passed to the embeddings dataframe's `eval` method to
obtain a boolean mask over its rows. Used to represent classwise
properties like `df['label'].str.contains(class_name)`. Combined with
`class_representative_corpus_global_mask_expr` for every class in
`class_names` to perform boolean indexing on the embeddings dataframe and
form the collection of class representatives.
num_representatives_per_class: Number of class representatives to sample. If
the pool of potential representatives is larger, it's downsampled
uniformly at random to the correct size. If -1, all representatives are
used.
"""
class_names: Sequence[str]
search_corpus_global_mask_expr: str
search_corpus_classwise_mask_fn: Callable[[str], str]
class_representative_global_mask_expr: str
class_representative_classwise_mask_fn: Callable[[str], str]
num_representatives_per_class: int
@classmethod
def v1_specification(
cls: type[_T],
location: str,
corpus_type: str,
num_representatives_per_class: int,
) -> _T:
"""Instantiates an eval protocol v1 EvalSetSpecification.
Args:
location: Geographical location in {'ssw', 'coffee_farms', 'hawaii'}.
corpus_type: Corpus type in {'xc_fg', 'xc_bg', 'birdclef'}.
num_representatives_per_class: Number of class representatives to sample.
If -1, all representatives are used.
Returns:
The EvalSetSpecification.
"""
downstream_class_names = (
namespace_db.load_db().class_lists['downstream_species_v2'].classes
)
# "At-risk" species are excluded from downstream data due to conservation
# status.
class_names = {
'ssw': (
namespace_db.load_db()
.class_lists['artificially_rare_species_v2']
.classes
),
'coffee_farms': [
c
for c in namespace_db.load_db().class_lists['coffee_farms'].classes
if c in downstream_class_names
],
'hawaii': [
c
for c in namespace_db.load_db().class_lists['hawaii'].classes
if c in downstream_class_names
],
}[location]
# The name of the dataset to draw embeddings from to form the corpus.
corpus_dataset_name = (
f'birdclef_{location}' if corpus_type == 'birdclef' else 'xc_downstream'
)
class_representative_dataset_name = {
'ssw': 'xc_artificially_rare_v2',
'coffee_farms': 'xc_downstream',
'hawaii': 'xc_downstream',
}[location]
# `'|'.join(class_names)` is a regex which matches *any* class in
# `class_names`.
class_name_regexp = '|'.join(class_names)
return cls(
class_names=class_names,
# Only include embeddings in the search corpus which have foreground
# ('label') and/or background labels ('bg_labels') for some class in
# `class_names`, which are encoded as space-separated species IDs/codes.
search_corpus_global_mask_expr=(
f'dataset_name == "{corpus_dataset_name}" and '
f'(label.str.contains("{class_name_regexp}") or '
f'bg_labels.str.contains("{class_name_regexp}"))'
),
# Ensure that target species' background vocalizations are not present
# in the 'xc_fg' corpus and vice versa.
search_corpus_classwise_mask_fn={
'xc_fg': lambda name: f'not bg_labels.str.contains("{name}")',
'xc_bg': lambda name: f'not label.str.contains("{name}")',
'birdclef': lambda _: 'label.str.contains("")',
}[corpus_type],
# Class representatives are drawn only from foreground-vocalizing
# species present in Xeno-Canto.
class_representative_global_mask_expr=(
f'dataset_name == "{class_representative_dataset_name}"'
),
class_representative_classwise_mask_fn=(
lambda name: f'label.str.contains("{name}")'
),
num_representatives_per_class=num_representatives_per_class,
)
@classmethod
def v2_specification(
cls: type[_T],
location: str,
corpus_type: str,
num_representatives_per_class: int,
) -> _T:
"""Instantiates an eval protocol v2 EvalSetSpecification.
Args:
location: Geographical location in {'ssw', 'coffee_farms', 'hawaii'}.
corpus_type: Corpus type in {'xc_fg', 'xc_bg', 'soundscapes'}.
num_representatives_per_class: Number of class representatives to sample.
If -1, all representatives are used.
Returns:
The EvalSetSpecification.
"""
downstream_class_names = (
namespace_db.load_db().class_lists['downstream_species_v2'].classes
)
class_names = {}
for region in _EVAL_REGIONS:
if region == 'ssw':
# Filter recordings with 'unknown' species label.
ssw = 'artificially_rare_species_v2'
species = [
c
for c in namespace_db.load_db().class_lists[ssw].classes
if c not in _MISMATCHED_SPECIES_CODES
]
elif region in ('peru', 'high_sierras', 'sierras_kahl'):
species = [
c
for c in namespace_db.load_db().class_lists[region].classes
if c not in _MISMATCHED_SPECIES_CODES
]
else:
# Keep recordings which map to downstream class species.
species = [
c
for c in namespace_db.load_db().class_lists[region].classes
if c in downstream_class_names
and c not in _MISMATCHED_SPECIES_CODES
]
class_names[region] = species
class_names = class_names[location]
# The name of the dataset to draw embeddings from to form the corpus.
corpus_dataset_name = (
f'soundscapes_{location}'
if corpus_type == 'soundscapes'
else 'xc_downstream'
)
class_representative_dataset_name = 'xc_class_reps'
class_name_regexp = '|'.join(class_names)
return cls(
class_names=class_names,
# Only include embeddings in the search corpus which have foreground
# ('label') and/or background labels ('bg_labels') for some class in
# `class_names`, which are encoded as space-separated species IDs/codes.
search_corpus_global_mask_expr=(
f'dataset_name == "{corpus_dataset_name}" and '
f'(label.str.contains("{class_name_regexp}") or '
f'bg_labels.str.contains("{class_name_regexp}"))'
),
# Ensure that target species' background vocalizations are not present
# in the 'xc_fg' corpus and vice versa.
search_corpus_classwise_mask_fn={
'xc_fg': lambda name: f'not bg_labels.str.contains("{name}")',
'xc_bg': lambda name: f'not label.str.contains("{name}")',
'soundscapes': lambda _: 'label.str.contains("")',
}[corpus_type],
# Class representatives are drawn from foreground-vocalizing species
# present in Xeno-Canto after applying peak-finding.
class_representative_global_mask_expr=(
f'dataset_name == "{class_representative_dataset_name}"'
),
class_representative_classwise_mask_fn=(
lambda name: f'label.str.contains("{name}")'
),
num_representatives_per_class=num_representatives_per_class,
)
@dataclasses.dataclass
class ClasswiseEvalSet:
class_name: str
class_representatives_df: pd.DataFrame
search_corpus_mask: pd.Series
@dataclasses.dataclass
class EvalSet:
name: str
search_corpus_df: pd.DataFrame
classwise_eval_sets: tuple[ClasswiseEvalSet, ...]
def _load_eval_dataset(dataset_config: ConfigDict) -> tf.data.Dataset:
"""Loads an evaluation dataset from its corresponding configuration dict."""
return data_utils.get_dataset(
split=dataset_config.split,
is_train=False,
dataset_directory=dataset_config.tfds_name,
tfds_data_dir=dataset_config.tfds_data_dir,
tf_data_service_address=None,
pipeline=dataset_config.pipeline,
)[0]
def load_eval_datasets(config: ConfigDict) -> dict[str, tf.data.Dataset]:
"""Loads all evaluation datasets for a given evaluation configuration dict.
Args:
config: the evaluation configuration dict.
Returns:
A dict mapping dataset names to evaluation datasets.
"""
return {
dataset_name: _load_eval_dataset(dataset_config)
for dataset_name, dataset_config in config.dataset_configs.items()
}
def get_embeddings(
dataset: tf.data.Dataset,
model_callback: Callable[[np.ndarray], np.ndarray],
batch_size: int,
) -> tf.data.Dataset:
"""Embeds the audio slice in each tf.Example across the input dataset.
Args:
dataset: A TF Dataset composed of tf.Examples.
model_callback: A Callable that takes a batched NumPy array and produces a
batched embedded NumPy array.
batch_size: The number of examples to embed in each batch.
Returns:
An updated TF Dataset with a new 'embedding' feature and deleted 'audio'
feature.
"""
def _map_func(example):
example['embedding'] = tf.numpy_function(
func=model_callback, inp=[example['audio']], Tout=tf.float32
)
del example['audio']
return example
# Use the 'audio' feature to produce a model embedding; delete the old 'audio'
# feature.
embedded_dataset = (
dataset.batch(batch_size, drop_remainder=False).prefetch(1).map(_map_func)
)
return embedded_dataset.unbatch()
def _get_class_representatives_df(
embeddings_df: pd.DataFrame,
class_representative_mask: pd.Series,
num_representatives_per_class: int,
rng_key: jax.random.KeyArray,
) -> pd.DataFrame:
"""Creates a class representatives DataFrame, possibly downsampling at random.
Args:
embeddings_df: The embeddings DataFrame.
class_representative_mask: A boolean mask indicating which embeddings to
consider for the class representatives.
num_representatives_per_class: Number of representatives per class to
select. If -1, all representatives are returned. When the number of
representatives indicated by `class_representative_mask` is greater than
`num_representatives_per_class`, they are downsampled at random to that
threshold.
rng_key: PRNG key used to perform the random downsampling operation.
Returns:
A DataFrame of class representatives.
"""
num_potential_class_representatives = class_representative_mask.sum()
if num_representatives_per_class >= 0:
# If needed, downsample to `num_representatives_per_class` at random.
if num_potential_class_representatives > num_representatives_per_class:
locations = sorted(
jax.random.choice(
rng_key,
num_potential_class_representatives,
shape=(num_representatives_per_class,),
replace=False,
).tolist()
)
# Set all other elements of the mask to False. Since
# `class_representative_mask` is a boolean series, indexing it with
# itself returns the True-valued rows. We can then use `locations` to
# subsample those rows and retrieve the resulting index subset.
index_subset = (
class_representative_mask[class_representative_mask]
.iloc[locations]
.index
)
class_representative_mask = class_representative_mask.copy()
class_representative_mask[
~class_representative_mask.index.isin(index_subset)
] = False
return embeddings_df[class_representative_mask]
@dataclasses.dataclass
class _HashedEmbeddingsDataFrame:
"""A hashable dataclass to encapsulate an embeddings DataFrame.
NOTE: The hash implementation relies on a unique object ID for the DataFrame,
which is determined at creation time. This is fast, but brittle. The
embeddings DataFrame should *never* be modified in-place; doing so would
result in a different DataFrame with the same hash.
"""
df: pd.DataFrame
def __hash__(self):
return id(self.df)
@functools.cache
def _df_eval(hashable_df: _HashedEmbeddingsDataFrame, expr: str) -> pd.Series:
return hashable_df.df.eval(expr, engine='python')
def _prepare_eval_set(
embeddings_df: _HashedEmbeddingsDataFrame,
eval_set_specification: EvalSetSpecification,
rng_key: jax.random.KeyArray,
) -> tuple[pd.DataFrame, tuple[ClasswiseEvalSet, ...]]:
"""Prepares a single eval set.
This entails creating and returning a search corpus DataFrame and a classwise
eval set generator. The latter iterates over classes in the eval set
specification and yields (class_name, class_representatives_df,
search_corpus_mask) tuples. Each search corpus mask indicates which part of
the search corpus should be ignored in the context of its corresponding class
(e.g., because it overlaps with the chosen class representatives).
Args:
embeddings_df: A DataFrame containing all evaluation embeddings and their
relevant metadata.
eval_set_specification: The specification used to form the eval set.
rng_key: The PRNG key used to perform random subsampling of the class
representatives when necessary.
Returns:
A (search_corpus_df, classwise_eval_set_generator) tuple.
"""
global_search_corpus_mask = _df_eval(
embeddings_df, eval_set_specification.search_corpus_global_mask_expr
)
global_class_representative_mask = _df_eval(
embeddings_df,
eval_set_specification.class_representative_global_mask_expr,
)
num_representatives_per_class = (
eval_set_specification.num_representatives_per_class
)
search_corpus_df = embeddings_df.df[global_search_corpus_mask]
classwise_eval_sets = []
for class_name in eval_set_specification.class_names:
choice_key, rng_key = jax.random.split(rng_key)
class_representative_mask = global_class_representative_mask & _df_eval(
embeddings_df,
eval_set_specification.class_representative_classwise_mask_fn(
class_name
),
)
# TODO(vdumoulin): fix the issue upstream to avoid having to skip
# classes in the first place.
if (
num_representatives_per_class >= 0
and class_representative_mask.sum() < num_representatives_per_class
):
logging.warning(
'Skipping %s as we cannot find enough representatives', class_name
)
continue
class_representatives_df = _get_class_representatives_df(
embeddings_df.df,
class_representative_mask,
num_representatives_per_class,
choice_key,
)
search_corpus_mask = (
global_search_corpus_mask
& _df_eval(
embeddings_df,
eval_set_specification.search_corpus_classwise_mask_fn(class_name),
)
# Exclude rows selected as class representatives.
& ~embeddings_df.df.index.isin(class_representatives_df.index)
)
search_corpus_mask = search_corpus_mask.loc[search_corpus_df.index]
# TODO(vdumoulin): fix the issue upstream to avoid having to skip classes
# in the first place.
if (
search_corpus_df['label'][search_corpus_mask].str.contains(class_name)
| search_corpus_df['bg_labels'][search_corpus_mask].str.contains(
class_name
)
).sum() == 0:
logging.warning(
'Skipping %s as the corpus contains no individual of that class',
class_name,
)
continue
classwise_eval_sets.append(
ClasswiseEvalSet(
class_name=class_name,
class_representatives_df=class_representatives_df,
search_corpus_mask=search_corpus_mask,
)
)
return search_corpus_df, tuple(classwise_eval_sets)
def _add_dataset_name(
features: dict[str, tf.Tensor], dataset_name: str
) -> dict[str, tf.Tensor]:
"""Adds a 'dataset_name' feature to a features dict.
Args:
features: The features dict.
dataset_name: The 'dataset_name' feature value to add.
Returns:
The features dict with the added 'dataset_name' feature.
"""
features['dataset_name'] = tf.constant(dataset_name)
if 'bg_labels' not in features:
features['bg_labels'] = tf.constant('')
return features
def _numpy_iterator_with_progress_logging(embedded_dataset):
for i, example in enumerate(embedded_dataset.as_numpy_iterator()):
yield example
logging.log_every_n(
logging.INFO,
'Converting concatenated embedded dataset to dataframe (%d done)...',
1000, # n=1000
i,
)
def _create_embeddings_dataframe(
embedded_datasets: dict[str, tf.data.Dataset], config: ConfigDict
) -> pd.DataFrame:
"""Builds a dataframe out of all embedded datasets.
The dataframe also contains upstream class representations (rows with the
'learned_representations' value for their 'dataset_name' column).
Args:
embedded_datasets: A mapping from dataset name to embedded dataset.
config: The evaluation configuration dict.
Returns:
The embeddings dataframe, with additional rows for upstream class
representations (accessible through `embeddings_df[
embeddings_df['dataset_name'] == 'learned_representations']`).
"""
# Concatenate all embedded datasets into one embeddings dataset.
it = iter(embedded_datasets.values())
embedded_dataset = next(it)
for dataset in it:
embedded_dataset = embedded_dataset.concatenate(dataset)
if config.debug.embedded_dataset_cache_path:
embedded_dataset = embedded_dataset.cache(
config.debug.embedded_dataset_cache_path
)
embeddings_df = pd.DataFrame(
_numpy_iterator_with_progress_logging(embedded_dataset)
)
# Encode 'label', 'bg_labels', 'dataset_name' column data as strings.
for column_name in ('label', 'bg_labels', 'dataset_name'):
embeddings_df[column_name] = (
embeddings_df[column_name].str.decode('utf-8').astype('string')
)
return embeddings_df
def prepare_eval_sets(
config: ConfigDict, embedded_datasets: dict[str, tf.data.Dataset]
) -> Iterator[EvalSet]:
"""Constructs and yields eval sets.
Args:
config: The evaluation configuration dict.
embedded_datasets: A mapping from dataset name to embedded dataset.
Yields:
A tuple of (eval_set_name, search_corpus_df, classwise_eval_set_generator).
The classwise eval set generator itself yields (class_name,
class_representatives_df, search_corpus_mask) tuples. Each search corpus
mask indicates which part of the search corpus should be ignored in the
context of its corresponding class (e.g., because it overlaps with the
chosen class representatives). The DataFrame (`*_df`) objects have the
following columns:
- embedding: numpy array of dtype float32.
- label: space-separated string of foreground labels.
- bg_labels: space-separated string of background labels.
- dataset_name: name of the dataset of origin for the embedding.
- recording_id: integer recording ID.
- segment_id: integer segment ID within the recording.
"""
# Add a 'dataset_name' feature to all embedded datasets.
embedded_datasets = {
dataset_name: dataset.map(
functools.partial(_add_dataset_name, dataset_name=dataset_name)
)
for dataset_name, dataset in embedded_datasets.items()
}
# Build a DataFrame out of all embedded datasets.
embeddings_df = _create_embeddings_dataframe(embedded_datasets, config)
logging.info(
'Preparing %d unique eval sets.', len(config.eval_set_specifications)
)
rng_key = jax.random.PRNGKey(config.rng_seed)
# Yield eval sets one by one.
for (
eval_set_name,
eval_set_specification,
) in config.eval_set_specifications.items():
rng_key, eval_set_key = jax.random.split(rng_key)
search_corpus_df, classwise_eval_sets = _prepare_eval_set(
embeddings_df=_HashedEmbeddingsDataFrame(embeddings_df),
eval_set_specification=eval_set_specification,
rng_key=eval_set_key,
)
yield EvalSet(
name=eval_set_name,
search_corpus_df=search_corpus_df,
classwise_eval_sets=classwise_eval_sets,
)
def search(
eval_set: EvalSet,
learned_representations: Mapping[str, np.ndarray],
create_species_query: Callable[[Sequence[np.ndarray]], np.ndarray],
search_score: Callable[[np.ndarray, np.ndarray], np.ndarray],
) -> Mapping[str, pd.DataFrame]:
"""Performs search over evaluation set examples and search corpus pairs.
Args:
eval_set: The evaluation set over which to perform search.
learned_representations: Mapping from class name to its learned
representation. If a key exists in the mapping, the corresponding
representation is used instead of calling `create_species_query` on the
class representatives.
create_species_query: A function callback provided by the user to construct
a search query from a collection of species vectors. Choice of methodology
left up to the user.
search_score: A function callback provided by the user to produce a score by
comparing two vectors (e.g. query and species representative/embedding).
Returns:
A mapping of query-species ID to a DataFrame of search results. The results
DataFrame is structured as follows, with num(search_corpus) rows and two
columns:
- each row corresponds to the results for a single search corpus example,
- column 1 contains a search score (float)
- column 2 contains an indicator of whether the eval and search species are
the same (bool).
- column 3 contains an indicator of whether to exclude the row for the
search corpus (bool).
"""
# A mapping from eval species class to a DataFrame of search results.
eval_search_results = dict()
# Gather all query vectors.
queries = np.stack(
[
learned_representations[ces.class_name]
if ces.class_name in learned_representations
else create_species_query(ces.class_representatives_df['embedding'])
for ces in eval_set.classwise_eval_sets
]
)
# Perform a matrix-matrix scoring using stacked queries and search corpus
# embeddings.
scores = search_score(
queries, np.stack(eval_set.search_corpus_df[_EMBEDDING_KEY].tolist())
)
for score, ces in zip(scores, eval_set.classwise_eval_sets):
species_scores = _make_species_scores_df(
score=pd.Series(score.tolist(), index=eval_set.search_corpus_df.index),
species_id=ces.class_name,
search_corpus=eval_set.search_corpus_df,
search_corpus_mask=ces.search_corpus_mask,
)
eval_search_results[ces.class_name] = species_scores
return eval_search_results
def _make_species_scores_df(
score: pd.Series,
species_id: str,
search_corpus: pd.DataFrame,
search_corpus_mask: pd.Series,
) -> pd.DataFrame:
"""Creates a DataFrame of scores and other metric-relevant information.
Args:
score: A Series of scores (with respect to a query for species `species_id`)
for each embedding in the search corpus.
species_id: The species ID of the query.
search_corpus: A DataFrame containing rows of search examples.
search_corpus_mask: A boolean Series indicating which part of the search
corpus should be ignored in the context of its corresponding class (e.g.,
because it overlaps with the chosen class representatives).
Returns:
A DataFrame where each row corresponds to the results on a single search
examplar, with columns for i) a numeric score, ii) a species match (bool)
checked between the query species ID and the search corpus examplar's
foreground and background species labels, and iii) a label mask (bool)
indicating whether the row should be ignored when computing metrics.
"""
search_species_scores = pd.DataFrame()
search_species_scores['score'] = score
fg_species_match = (
search_corpus[_LABEL_KEY]
.apply(lambda x: species_id in x.split(' '))
.astype(np.int16)
)
bg_species_match = (
search_corpus[_BACKGROUND_KEY]
.apply(lambda x: species_id in x.split(' '))
.astype(np.int16)
)
search_species_scores['species_match'] = fg_species_match | bg_species_match
search_species_scores['label_mask'] = search_corpus_mask
return search_species_scores
def compute_metrics(
eval_set_name: str,
eval_set_results: Mapping[str, pd.DataFrame],
sort_descending: bool = True,
):
"""Computes roc-auc & average precision on provided eval results DataFrame.
Args:
eval_set_name: The name of the evaluation set.
eval_set_results: A mapping from species ID to a DataFrame of the search
results for that species (with columns 'score', 'species_match', and
'label_mask').
sort_descending: An indicator if the search result ordering is in descending
order to be used post-search by average-precision based metrics. Sorts in
descending order by default.
Returns:
Produces metrics (average_precision, roc_auc, num_pos_match, num_neg_match)
computed for each species in the given eval set and writes these to a csv
for each eval set.
"""
# TODO(hamer): consider moving eval_set_name metadata (i.e. # exemplars, seed)
# to separate columns in the metric results.
species_metric_eval_set = list()
for eval_species, eval_results in eval_set_results.items():
eval_scores = eval_results['score'].to_numpy()
species_label_match = eval_results['species_match'].to_numpy()
label_mask = eval_results['label_mask'].to_numpy().astype(np.int64)
roc_auc = metrics.roc_auc(
logits=eval_scores.reshape(-1, 1),
labels=species_label_match.reshape(-1, 1),
label_mask=label_mask.reshape(-1, 1),
sort_descending=sort_descending,
)[
'macro'
] # Dictionary of macro, geometric, individual & individual_var.
average_precision = metrics.average_precision(
eval_scores,
species_label_match,
label_mask=label_mask,
sort_descending=sort_descending,
)
num_pos_match = sum(species_label_match == 1)
num_neg_match = sum(species_label_match == 0)
species_metric_eval_set.append((
eval_species,
average_precision,
roc_auc,
num_pos_match,
num_neg_match,
eval_set_name,
))
return species_metric_eval_set
def write_results_to_csv(
metric_results: Sequence[tuple[str, float, float, str]],
write_results_dir: str,
write_filename: str | None,
):
"""Write evaluation metric results to csv.
Writes a csv file where each row corresponds to a particular evaluation
example's search task performance. If the provided write_results_dir doesn't
exist, it is created. If an evaluation results file already exists, it is
overwritten.
Args:
metric_results: A sequence of tuples of (eval species name,
average_precision, roc_auc [arithmetic mean], evaluation set name) to
write to csv. The first row encodes the column header or column names.
write_results_dir: The path to write the computed metrics to file.
write_filename: A specified name for the eval results file.
"""
write_results_path = os.path.join(write_results_dir, write_filename)
results_df = pd.DataFrame(metric_results[1:], columns=metric_results[0])
# Check if the specified directory exists; if not, create & write to csv.
if write_results_dir.find('cns') == 0:
if not os.path.exists(write_results_dir):
os.makedirs(write_results_dir)
results_df.to_csv(write_results_path, index=False)
# TODO(bringingjoy): update return type to a Sequence of
# np.ndarrays when extending create_species_query to support returning multiple
# queries for a given eval species.
def create_averaged_query(
species_representatives: Sequence[np.ndarray],
) -> np.ndarray:
"""Creates a search query from representatives by averaging embeddings.
Args:
species_representatives: a collection of vectors representing species
vocalizations.
Returns:
An element-wise average of the vectors to serve as a search query.
"""
query = np.mean(species_representatives, axis=0)
return query
def cosine_similarity(vector_a: np.ndarray, vector_b: np.ndarray) -> np.ndarray:
"""Computes cosine similarity between two vectors and returns the score.
Args:
vector_a: an n-dimensional vector of floats.
vector_b: an n-dimensional vector of floats.
Returns:
The cosine similarity score between two vectors A and B, where increasing
score corresponds to vector similarity.
Note:
Definition: A dot B / ||A|| * ||B||. Scores
close to -1 means A and B are 'opposite' vectors,
close to 0 means A and B are 'orthogonal' vectors, and
close to 1 means to A and B are very similar vectors.
"""
dot_prod = vector_a @ vector_b.T
norm_prod = (
np.linalg.norm(vector_a, axis=-1, keepdims=True)
* np.linalg.norm(vector_b, axis=-1, keepdims=True).T
)
return dot_prod / norm_prod
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model callbacks library."""
import dataclasses
from typing import cast, Sequence
from absl import logging
from chirp.eval import eval_lib
from chirp.inference import interface
from chirp.inference import models as inference_models
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
from chirp.train import classifier
from chirp.train import hubert
from chirp.train import separator
from clu import checkpoint
from etils import epath
import jax
from jax import numpy as jnp
import ml_collections
import numpy as np
import tensorflow as tf
ConfigDict = ml_collections.ConfigDict
def pmap_with_remainder(
model_callable: eval_lib.EvalModelCallable,
) -> eval_lib.EvalModelCallable:
"""Run a model callback in a multi-device setting.
Since the model can be called with a variable batch size, this has to be
handled in a multi-device environment. We do this by splitting the batch
across the devices and hosts using `pmap`. If there is a remainder to the
batch, then we process this in a separate call which is done on each host.
Args:
model_callable: A model callable (must be a JAX function).
Returns:
A model callable with the same signature but which uses data parallelism.
"""
model_callable_pmap = jax.pmap(model_callable)
model_callable_jit = jax.jit(model_callable)
def parallel_model_callable(inputs: np.ndarray) -> np.ndarray:
# Split the batch across devices
n, m = jax.local_device_count(), inputs.shape[0]
if m < n:
return model_callable_jit(inputs)
batch = jnp.reshape(inputs[: n * (m // n)], (n, m // n) + inputs.shape[1:])
outputs = model_callable_pmap(batch)
outputs = jnp.reshape(outputs, (n * (m // n),) + outputs.shape[2:])
# Check if there is a remainder to the batch
r = m - n * (m // n)
if r == 0:
return outputs
else:
# If not, run the remainder of the batch on each host
batch = inputs[n * (m // n) :]
remainder = model_callable_jit(batch)
return jnp.concatenate([outputs, remainder])
return parallel_model_callable
@dataclasses.dataclass
class TaxonomyModelCallback:
"""A model callback implementation for TaxonomyModel checkpoints.
Attributes:
init_config: TaxonomyModel configuration.
workdir: path to the model checkpoint.
use_learned_representations: If True, use the model's output weights as a
learned representation for species seen during training. If False, reverts
to the default behavior of using all embedded upstream recordings for
artificially rare species to form search queries.
learned_representation_blocklist: Species codes for learned representations
which should *not* appear in the `learned_representations` mapping. This
is analogous in result to having an allowlist for which species codes use
the `learned_representations`. By default, this is set to False so that
all eval sets use embedded class representatives with which to form
species queries.
model_callback: the fprop function used as part of the model callback,
created automatically post-initialization.
learned_representations: mapping from class name to its learned
representation, created automatically post-initialization. If
`use_learned_representations` is False, it is left empty, which results in
the evaluation protocol relying instead on embedded upstream recordings to
form search queries.
"""
init_config: ConfigDict
workdir: str
use_learned_representations: bool = False
learned_representation_blocklist: Sequence[str] = dataclasses.field(
default_factory=list
)
# The following are populated during init.
model_callback: eval_lib.EvalModelCallable = dataclasses.field(init=False)
learned_representations: dict[str, np.ndarray] = dataclasses.field(
init=False, default_factory=dict
)
def __post_init__(self):
model_bundle, train_state = classifier.initialize_model(
workdir=self.workdir, **self.init_config
)
# All hosts should load the same checkpoint
multihost_ckpt = cast(checkpoint.MultihostCheckpoint, model_bundle.ckpt)
ckpt = checkpoint.Checkpoint(multihost_ckpt.multihost_base_directory + '-0')
train_state = ckpt.restore(train_state)
variables = {'params': train_state.params, **train_state.model_state}
def fprop(inputs):
return model_bundle.model.apply(variables, inputs, train=False).embedding
self.model_callback = pmap_with_remainder(fprop)
if self.use_learned_representations:
class_list = (
namespace_db.load_db()
.class_lists[self.init_config.target_class_list]
.classes
)
head_index = list(model_bundle.model.num_classes.keys()).index('label')
output_weights = train_state.params[f'Dense_{head_index}']['kernel'].T
self.learned_representations.update(
{
n: w
for n, w in zip(class_list, output_weights)
if n not in self.learned_representation_blocklist
}
)
def __call__(self, inputs: np.ndarray) -> np.ndarray:
return np.asarray(self.model_callback(inputs))
@dataclasses.dataclass
class SeparatorTFCallback:
"""An eval model callback the embedding from an audio separator."""
model_path: str
use_learned_representations: bool = False
learned_representation_blocklist: Sequence[str] = dataclasses.field(
default_factory=list
)
frame_size: int = 32000
# The following are populated during init.
model_callback: eval_lib.EvalModelCallable = dataclasses.field(init=False)
learned_representations: dict[str, np.ndarray] = dataclasses.field(
init=False, default_factory=dict
)
def _load_learned_representations(self):
"""Loads classifier output weights from the separator."""
label_csv_path = epath.Path(self.model_path) / 'label.csv'
with label_csv_path.open('r') as f:
class_list = namespace.ClassList.from_csv(f)
# Load the output layer weights.
variables_path = (
epath.Path(self.model_path) / 'savedmodel/variables/variables'
).as_posix()
variables = tf.train.list_variables(variables_path)
candidates = []
for v, v_shape in variables:
# The classifier output layer is a 1D convolution with kernel size
# (1, embedding_dim, num_classes).
if (
len(v_shape) == 3
and v_shape[0] == 1
and v_shape[-1] == len(class_list.classes)
):
candidates.append(v)
if not candidates:
raise ValueError('Could not locate output weights layer.')
elif len(candidates) > 1:
raise ValueError(
'Found multiple layers which could be the output weights layer (%s).'
% candidates
)
else:
output_weights = tf.train.load_variable(variables_path, candidates[0])
output_weights = np.squeeze(output_weights)
self.learned_representations.update(
{
n: w
for n, w in zip(class_list.classes, output_weights)
if n not in self.learned_representation_blocklist
}
)
def __post_init__(self):
logging.info('Loading separation model...')
separation_model = tf.saved_model.load(
epath.Path(self.model_path) / 'savedmodel'
)
def fprop(inputs):
framed_inputs = np.reshape(
inputs,
[
inputs.shape[0],
inputs.shape[1] // self.frame_size,
self.frame_size,
],
)
# Outputs are separated audio, logits, and embeddings.
_, _, embeddings = separation_model.infer_tf(framed_inputs)
# Embeddings have shape [B, T, D]; we need to aggregate over time.
# For separation models, the mid-point embedding is usually best.
midpt = embeddings.shape[1] // 2
embeddings = embeddings[:, midpt, :]
return embeddings
self.model_callback = fprop
if self.use_learned_representations:
logging.info('Loading learned representations...')
self._load_learned_representations()
logging.info('Model loaded.')
def __call__(self, inputs: np.ndarray) -> np.ndarray:
return np.asarray(self.model_callback(inputs))
@dataclasses.dataclass
class EmbeddingModelCallback:
"""A general callback implementation for inference.EmbeddingModel wrappers.
Attributes:
model_key: Key for the model. See chirp.inference.models.model_class_map.
model_config: Config dict for the target model.
time_pooling: Named method for reducing embeddings over the time dimension.
See chirp.inference.interface.InferenceOutputs.pooled_embeddings.
channel_pooling: Named method for reducing embeddings channel dimension. See
chirp.inference.interface.InferenceOutputs.pooled_embeddings.
loaded_model: The instantiated interface.EmbeddingModel.
model_callback: Eval callback.
learned_representations: Empty learned_represenations map.
"""
model_key: str
model_config: ConfigDict
time_pooling: str = 'mean'
channel_pooling: str = 'squeeze'
# The following are populated during init.
loaded_model: interface.EmbeddingModel = dataclasses.field(init=False)
model_callback: eval_lib.EvalModelCallable = dataclasses.field(init=False)
# We don't use learned_representations with the simple wrapper, but need to
# provide an empty mapping for the API.
learned_representations: dict[str, np.ndarray] = dataclasses.field(
init=True, default_factory=dict
)
def __post_init__(self):
logging.info('Loading separation model...')
model_class = inference_models.model_class_map()[self.model_key]
self.loaded_model = model_class(**self.model_config)
# Set the object's call method as the model_callback.
self.model_callback = self.__call__
def __call__(self, inputs: np.ndarray) -> np.ndarray:
model_outputs = self.loaded_model.batch_embed(inputs)
# Batched model outputs have shape [B, T, C, D], but we require [B, D].
return model_outputs.pooled_embeddings(
self.time_pooling, self.channel_pooling
)
@dataclasses.dataclass
class HuBERTModelCallback:
"""A model callback implementation for HuBERTModel checkpoints.
Attributes:
init_config: TaxonomyModel configuration.
workdir: path to the model checkpoint.
embedding_index: index of the embedding vector to retrieve in the list of
embeddings output by the model.
model_callback: the fprop function used as part of the model callback,
created automatically post-initialization.
learned_representations: mapping from class name to its learned
representation, created automatically post-initialization and left empty
(because HuBERT is self-supervised).
"""
init_config: ConfigDict
workdir: str
embedding_index: int
model_callback: eval_lib.EvalModelCallable = dataclasses.field(init=False)
learned_representations: dict[str, np.ndarray] = dataclasses.field(
init=False, default_factory=dict
)
def __post_init__(self):
model_bundle, train_state, _ = hubert.initialize_model(
workdir=self.workdir, num_train_steps=1, **self.init_config
)
train_state = model_bundle.ckpt.restore(train_state)
variables = {'params': train_state.params, **train_state.model_state}
@jax.jit
def fprop(inputs):
model_outputs = model_bundle.model.apply(
variables, inputs, train=False, mask_key=None
)
return model_outputs.embedding[self.embedding_index].mean(axis=-2)
self.model_callback = fprop
def __call__(self, inputs: np.ndarray) -> np.ndarray:
return np.asarray(self.model_callback(inputs))
@dataclasses.dataclass
class SeparationModelCallback:
"""A model callback implementation for SeparationModel checkpoints.
Attributes:
init_config: SeparationModel configuration.
workdir: path to the model checkpoint.
model_callback: the fprop function used as part of the model callback,
created automatically post-initialization.
learned_representations: mapping from class name to its learned
representation, created automatically post-initialization. If
`use_learned_representations` is False, it is left empty, which results in
the evaluation protocol relying instead on embedded upstream recordings to
form search queries.
"""
init_config: ConfigDict
workdir: str
model_callback: eval_lib.EvalModelCallable = dataclasses.field(init=False)
learned_representations: dict[str, np.ndarray] = dataclasses.field(
init=False, default_factory=dict
)
def __post_init__(self):
model_bundle, train_state = separator.initialize_model(
workdir=self.workdir, **self.init_config
)
train_state = model_bundle.ckpt.restore_or_initialize(train_state)
variables = {'params': train_state.params, **train_state.model_state}
@jax.jit
def fprop(inputs):
return model_bundle.model.apply(
variables, inputs, train=False
).embedding.mean(axis=-2)
self.model_callback = fprop
def __call__(self, inputs: np.ndarray) -> np.ndarray:
return np.asarray(self.model_callback(inputs))
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained model."""
from collections.abc import Sequence
import os
from absl import app
from absl import flags
from absl import logging
from chirp import config_utils
from chirp.configs import config_globals
from chirp.eval import eval_lib
import jax
from ml_collections.config_flags import config_flags
_CONFIG = config_flags.DEFINE_config_file('config')
_EVAL_RESULTS_HEADER = (
'eval_species',
'average_precision',
'roc_auc',
'num_pos_match',
'num_neg_match',
'eval_set_name',
)
flags.mark_flags_as_required(['config'])
def _main():
"""Main function."""
logging.info(_CONFIG.value)
# We need to set Jax and TF GPU options before any other jax/tf calls.
# Since calls can potentially happen in parse_config, we'll handle GPU options
# before parsing the config.
if hasattr(_CONFIG.value, 'jax_mem_frac'):
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = str(_CONFIG.value)
if hasattr(_CONFIG.value, 'tf_gpu_growth') and _CONFIG.value.tf_gpu_growth:
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
config = config_utils.parse_config(
_CONFIG.value, config_globals.get_globals()
)
# Check that the required user-specified fields are set in the config.
if config.create_species_query is None:
raise ValueError(
'eval.py requires `config.create_species_query` to be set '
'to a boolean value (True or False) in the passed config. '
'Please update your config file and run again.'
)
if config.score_search is None:
raise ValueError(
'eval.py requires `config.score_search` to be set to a '
'boolean value (True or False) in the passed config. '
'Please update your config file and run again.'
)
if config.sort_descending is None:
raise ValueError(
'eval.py requires `sort_descending` to be set to a '
'boolean value (True or False) in the passed config. '
'Please update your config file and run again.'
)
# Ensure that every evaluation script includes an instantiation of a Pipeline
# object with any desired data processing ops.
for dataset_config in config.dataset_configs.values():
if dataset_config.pipeline is None:
raise ValueError(
'eval.py requires each dataset_config in `config.dataset_configs` to '
'have a `pipeline` attribute set to a '
'`config_utils.callable_config` object with any desired data '
'processing operations (ops).'
)
eval_datasets = eval_lib.load_eval_datasets(config)
embedded_datasets = dict()
for dataset_name, dataset in eval_datasets.items():
logging.info('%s:\n%s', dataset_name, dataset)
embedded_datasets[dataset_name] = eval_lib.get_embeddings(
dataset, config.model_callback, config.batch_size
)
eval_set_search_results = dict()
for eval_set in eval_lib.prepare_eval_sets(config, embedded_datasets):
logging.info(eval_set.name)
search_results = eval_lib.search(
eval_set,
config.model_callback.learned_representations,
config.create_species_query,
config.score_search,
)
eval_set_search_results[eval_set.name] = search_results
# Collect eval set species performance results as a list of tuples.
eval_metrics = [_EVAL_RESULTS_HEADER]
for eval_set_name, eval_set_results in eval_set_search_results.items():
eval_metrics.extend(
eval_lib.compute_metrics(
eval_set_name, eval_set_results, config.sort_descending
)
)
# In a multi-host setting, only the first host should write results
if jax.process_index() == 0:
eval_lib.write_results_to_csv(
eval_metrics, config.write_results_dir, config.write_filename
) # pytype: disable=wrong-arg-types # jax-ndarray
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
with jax.default_matmul_precision('float32'):
_main()
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for subsampling dataframes."""
import bisect
import collections
import copy
import hashlib
from absl import logging
import jax
import numpy as np
import pandas as pd
# The way a recording is represented in the DFS carried out in
# `sample_recordings_under_constraints`. First element is its foreground
# species. Second element is its list of background species. Final element is
# its index in the dataframe.
_RECORDING = tuple[str, list[str], int]
def sample_recordings(
df: pd.DataFrame,
target_fg: dict[str, int],
prng_seed: int,
):
"""Subsamples recordings from df.
Args:
df: The dataframe to subsample.
target_fg: A dictionnary mapping each species to its required number of
foreground recordings to be subsampled.
prng_seed: The PRNG seed to use for random sampling.
Returns:
The subsampled df such that there are exactly target_fg[species] foreground
labels of each species.
"""
key = jax.random.PRNGKey(prng_seed)
def _subsample(group_df):
(species_code,) = group_df['species_code'].unique()
indices = sorted(
jax.random.choice(
# Create a unique key derived from the global key and the species
# code.
jax.random.fold_in(
key,
int(
hashlib.md5(species_code.encode()).hexdigest()[:8], base=16
),
),
len(group_df),
shape=(target_fg[species_code],),
replace=False,
).tolist()
)
return group_df.iloc[indices]
return (
df[df['species_code'].isin(target_fg.keys())]
.groupby('species_code', group_keys=False)
.apply(_subsample)
)
def hit_target(count_dic: dict[str, int]) -> bool:
return np.all(np.array(list(count_dic.values())) == 0)
def find_valid_subset(
remaining_fg: collections.OrderedDict[str, int],
remaining_bg: collections.OrderedDict[str, int],
chosen: list[_RECORDING],
seen: dict[tuple[_RECORDING], bool],
candidates: list[_RECORDING],
) -> list[_RECORDING] | None:
"""Function that tries to find a valid solution to sampling under constraints.
This function performs a DFS to find a subset of recordings that satisfies
the constraints. The list of `chosen` recordings defines the current path
in the tree. Because randomly searching for solutions is generally
intractable, we guide the search by only solving the constraints for one
species at a time. The order in which species are addressed is implicitly
defined by the order of the keys from remaining_fg and remaining_bg (should)
coincide.
Args:
remaining_fg: For each species, the number of foreground recordings left to
find. Species (keys) should ideally be sorted from hardest to easiest.
remaining_bg: For each species, the number of background recordings left to
find. Species (keys) should ideally be sorted from hardest to easiest.
chosen: The recordings chosen so far.
seen: The branches of the tree already visited.
candidates: The pool of recordings to pick from.
Returns:
The solution (=list of recordings) if it finds any, None otherwise.
"""
# Check if we hit the target
if hit_target(remaining_fg) and hit_target(remaining_bg):
return chosen
# Check that we still have candidates
if not candidates:
return None
# Check that we haven't already been there. `chosen` needs to be sorted
# for this to work. This is ensured when we construct `chosen`.
if seen[tuple(chosen)]:
return None
# Else we continue visiting. We focus on a single species at a time. We
# fetch the first species for which the constraints are not yet satisfied.
# In the event that remaining_bg's keys are sorted by decreasing difficulty,
# this corresponds to fetching the most difficult species not yet satisfied.
for s in remaining_bg:
if remaining_bg[s] > 0 or remaining_fg[s] > 0:
current_species = s
break
for index, recording in enumerate(candidates):
if valid_recording(recording, remaining_fg, remaining_bg, current_species):
updated_fg = copy.copy(remaining_fg)
updated_bg = copy.copy(remaining_bg)
if recording[0] in updated_fg:
updated_fg[recording[0]] -= 1
# In background, a same species may appear twice, so we take set() to
# remove dupplicates.
for bg_rec in set(recording[1]):
if bg_rec in updated_bg:
updated_bg[bg_rec] -= 1
new_chosen = copy.copy(chosen)
bisect.insort(new_chosen, recording)
res = find_valid_subset(
updated_fg,
updated_bg,
new_chosen,
seen,
[x for i, x in enumerate(candidates) if i != index],
)
if res is not None:
return res
seen[tuple(chosen)] = True
return None
def valid_recording(
recording: _RECORDING,
remaining_fg: collections.OrderedDict[str, int],
remaining_bg: collections.OrderedDict[str, int],
current_species: str,
) -> bool:
"""Decides whether a child (=recording) should be explored next.
The function checks whether (i) The recording contains the species we are
currently addressing (ii) if yes, whether adding this recording to 'chosen'
wouldn't violate any constraint.
Args:
recording: The recording whose relevance we want to assess.
remaining_fg: For each species, the number of foreground recordings left to
find. Species (keys) should ideally be sorted from hardest to easiest.
remaining_bg: For each species, the number of background recordings left to
find. Species (keys) should ideally be sorted from hardest to easiest.
current_species: The current species the search is focused on satisfying.
Returns:
True if the recording should be explored, False otherwise.
"""
# Ensure the current_species is in this recording.
if (
remaining_fg[current_species] > 0 and recording[0] == current_species
) or (remaining_bg[current_species] > 0 and current_species in recording[1]):
# Ensure it doesn't violate any constraint.
violates_fg = (
recording[0] in remaining_fg and remaining_fg[recording[0]] == 0
)
violates_bg = any(
[x in remaining_bg and remaining_bg[x] == 0 for x in recording[1]]
)
if not violates_fg and not violates_bg:
return True
return False
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to filter/scrub data."""
import enum
import functools
from typing import Any, Dict, NamedTuple, Sequence, Union
from chirp.data import sampling_utils as su
from chirp.taxonomy import namespace_db
import numpy as np
import pandas as pd
class MergeStrategy(enum.Enum):
"""Strategy used to merge the results of parallel queries in QueryParallel."""
OR = 'or'
AND = 'and'
CONCAT_NO_DUPLICATES = 'concat_no_duplicates'
class MaskOp(enum.Enum):
"""Operations used for selecting samples.
Takes as input a dataframe and returns boolean pd.Series corresponding to the
selected samples.
"""
NOT_IN = 'not_in'
CONTAINS_NO = 'contains_no'
CONTAINS_ANY = 'contains_any'
IN = 'in'
class TransformOp(enum.Enum):
"""Operations that transform the dataFrame.
Take as input a dataframe, and return an updated version of this dataframe.
"""
SCRUB = 'scrub'
SCRUB_ALL_BUT = 'scrub_all_but'
FILTER = 'filter'
SAMPLE = 'sample'
APPEND = 'append'
SerializableType = list[int | str | bytes] | MaskOp | TransformOp | Dict
class Query(NamedTuple):
"""The main interface for processing operations.
A query is serializable.
It contains an operation (op), along with its kwargs. Additionally,
for 'masking query' (when the op is a MaskOp), a complement option can be
activated to return the complement of what the original query would have
returned. Combined with consistent PRNG seeding, this feature makes it easy to
partition data for training and evaluation.
"""
op: MaskOp | TransformOp
kwargs: dict[str, SerializableType]
class QuerySequence(NamedTuple):
"""A sequence of Queries to be applied sequentially.
Contains a sequence of Query to be applied sequentially on a dataframe.
This sequence can be targeted to a subpopulation of samples through specifying
a mask_query (i.e. a Query whose op is a MaskOp), for instance only
scrubbing bg_labels from a specific subset of species.
"""
queries: Sequence[Union[Query, 'QuerySequence', 'QueryParallel']]
mask_query: Union[Query, 'QueryParallel'] | None = None
class QueryParallel(NamedTuple):
"""A sequence of Queries to be applied in parallel.
Contains a sequence of Query to be applied in parallel from a given dataframe.
Once all queries have been independently executed, we merge the resulting df
using the merge_strategy defined.
"""
queries: Sequence[Union[Query, QuerySequence, 'QueryParallel']]
merge_strategy: MergeStrategy
class QueryComplement(NamedTuple):
"""Applies the complement of a query.
The unique_key is used to uniquely identify samples. Therefore, the values
at that field must remain **unchanged** throughout the application of query.
"""
query: Query | QuerySequence
unique_key: str
def apply_complement(
df: pd.DataFrame, query_complement: QueryComplement
) -> pd.DataFrame:
"""Applies a QueryComplement.
If the query transforms the df into a boolean Series, we just return the
complement of the mask. For transform operations, we compare the values
at query_complement.unique_key of samples initially present, minus those
remaining after the application of query_complement.query. This assumes that
(i) values in df[query_complement.unique_key] bijectively map to recordings.
(ii) query_complement.query **does not** modify in-place this mapping.
Args:
df: The dataframe to apply the QueryComplement on.
query_complement: The QueryComplement to apply.
Returns:
The complemented query.
Raises:
ValueError: Some values in df[query_complement.unique_key] are duplicates,
which violates condition (i) above.
"""
updated_df = APPLY_FN[type(query_complement.query)](
df, query_complement.query
)
# If the query used a MaskOp (yields a boolean Series), we return the
# complement of this boolean Series.
if isinstance(query_complement.query, MaskOp):
return ~updated_df
# For other transformations, we use the unique_key to return the complement.
else:
key = query_complement.unique_key
if df[key].duplicated().any():
raise ValueError(
f'The values at {key} should uniquely define each'
'recording. Currently, some recordings share a similar'
'value.'
)
complement_values = set(df[key]) - set(updated_df[key])
comp_mask = df[key].apply(lambda v: v in complement_values)
return df[comp_mask]
def apply_query(
df: pd.DataFrame,
query: Query,
) -> pd.DataFrame | pd.Series:
"""Applies a query on a DataFrame.
Args:
df: The dataframe on which the query is applied.
query: The query to apply.
Returns:
The new version of the dataFrame (or Series) after applying the query.
"""
return OPS[query.op](df, **query.kwargs)
def apply_sequence(
df: pd.DataFrame,
query_sequence: QuerySequence,
) -> pd.DataFrame | pd.Series:
"""Applies a QuerySequence to a DataFrame.
Args:
df: The DataFrame on which to apply the query.
query_sequence: The QuerySequence to apply to df.
Returns:
The updated version of the df, where all the queries in
query_sequence.queries
have been sequentially applied in the specified order.
"""
if query_sequence.mask_query is not None:
mask = APPLY_FN[type(query_sequence.mask_query)](
df, query_sequence.mask_query
)
assert mask.dtype == bool
modifiable_df = df[mask]
frozen_df = df[~mask]
for query in query_sequence.queries:
modifiable_df = APPLY_FN[type(query)](modifiable_df, query)
return pd.concat([frozen_df, modifiable_df])
else:
for query in query_sequence.queries:
df = APPLY_FN[type(query)](df, query)
return df
def apply_parallel(
df: pd.DataFrame,
query_parallel: QueryParallel,
) -> pd.DataFrame | pd.Series:
"""Applies a QueryParallel to a DataFrame.
Args:
df: The DataFrame on which to apply the query.
query_parallel: The QueryParallel to apply to df.
Returns:
The updated version of the df, where all the queries in
query_sequence.queries
have been sequentially applied in the specified order.
"""
all_dfs = []
for query in query_parallel.queries:
all_dfs.append(APPLY_FN[type(query)](df, query))
final_df = MERGE_FN[query_parallel.merge_strategy](all_dfs)
return final_df
def is_in(
df: pd.DataFrame, key: str, values: list[SerializableType]
) -> pd.Series:
"""Builds a binary mask of whether `df[key]` is in `values`.
Useful for filtering.
Args:
df: The DataFrame.
key: The column used for filtering.
values: Values to look out for.
Returns:
A boolean Series representing whether `df[key]` is in `values`.
Raises:
ValueError: 'key' does not exist in df.
TypeError: inconsistent types in df[key] and values.
"""
if key not in df:
raise ValueError(
f'{key} is not a correct field. Please choose among{list(df.columns)}'
)
values_types = set(type(v) for v in values)
df_column_types = set(df[key].map(type).unique())
if len(values_types.union(df_column_types)) != 1:
raise TypeError("Inconsistent types between df['{key}'] and values")
return df[key].isin(values)
def contains_any(df: pd.DataFrame, key: str, values: list[str]) -> pd.Series:
"""Builds a binary mask of whether `df[key]` contains any of `values`.
Args:
df: The DataFrame. Note that `df[key]` must be a Sequence, e.g. the
background labels.
key: The column used for filtering.
values: Values to look out for.
Returns:
A boolean Series representing whether `df[key]` contains any of `values`.
Raises:
ValueError: key does not exist in df.
ValueError: inconsistent types in df[key] and values.
"""
if key not in df:
raise ValueError(
f'{key} is not a correct field. Please choose among{list(df.columns)}'
)
values_types = set(type(v) for v in values)
df_column_types = set().union(
*df[key].map(lambda xs: set(type(x) for x in xs))
)
if len(values_types.union(df_column_types)) != 1:
raise ValueError("Inconsistent types between df['{key}'] and values")
return df[key].map(' '.join).str.contains('|'.join(values))
def contains_no(df: pd.DataFrame, key: str, values: list[str]) -> pd.Series:
"""Builds a binary mask of whether `df[key]` contains none of `values`.
Args:
df: The DataFrame. Note that `df[key]` must be a Sequence, e.g. the
background labels.
key: The column used for filtering.
values: Values to look out for.
Returns:
A boolean Series representing whether `df[key]` contains none of `values`.
Raises:
ValueError: key does not exist in df.
"""
return ~contains_any(df, key, values)
def is_not_in(
df: pd.DataFrame, key: str, values: list[SerializableType]
) -> pd.Series:
return ~is_in(df, key, values)
def append(df: pd.DataFrame, row: dict[str, Any]):
if set(row.keys()) != set(df.columns):
raise ValueError
new_df = pd.concat([df, pd.DataFrame(pd.Series(row))], ignore_index=True)
return new_df
def scrub(
feature_dict: dict[str, Any],
key: str,
values: Sequence[SerializableType],
all_but: bool = False,
replace_value: SerializableType | None = None,
) -> dict[str, Any]:
"""Removes any occurence of any value in values from feature_dict[key].
Args:
feature_dict: A dictionary that represents the row (=recording) to be
potentially scrubbed in a DataFrame.
key: The field from feature_dict used for scrubbing.
values: The values that will be scrubbed from feature_dict[key].
all_but: If activated, will scrub every value, except those specified.
replace_value: If specified, used as a placeholder wherever a value was
scrubbed.
Returns:
A copy of feature_dict, where all values at key have been scrubbed.
Raises:
ValueError: 'key' does not exist in df.
TypeError: any element of 'values' has a type different from the type at
df[key], or feature_dict[key] is not a str, list or np.ndarray.
"""
if key not in feature_dict:
raise ValueError(
f'{key} is not a correct field.'
f'Please choose among {list(feature_dict.keys())}'
)
if type(feature_dict[key]) not in [list, np.ndarray, str]:
raise TypeError(
'Can only scrub values from str/lists/ndarrays. Current column'
'is of type {}'.format(type(feature_dict[key]))
)
# Using this 'dirty' syntax because values and feature_dict[key] could be
# list or ndarray -> using the 'not values' to check emptiness does not work.
if len(values) == 0 or len(feature_dict[key]) == 0: # pylint: disable=g-explicit-length-test
return feature_dict
field_type = type(feature_dict[key][0])
for index, val in enumerate(values):
if not isinstance(val, field_type):
raise TypeError(
'Values[{}] has type {}, while values in feature_dict[{}] have'
' type {}'.format(index, type(val), key, field_type)
)
# Avoid changing the feature_dict in-place.
new_feature_dict = feature_dict.copy()
key_type = type(new_feature_dict[key])
if key_type == str:
new_feature_dict[key] = new_feature_dict[key].split(' ')
scrub_mask = [True if x in values else False for x in new_feature_dict[key]]
if all_but:
scrub_mask = [not x for x in scrub_mask]
if replace_value is None:
new_feature_dict[key] = [
x for x, scrub in zip(new_feature_dict[key], scrub_mask) if not scrub
]
else:
new_feature_dict[key] = [
x if not scrub else replace_value
for x, scrub in zip(new_feature_dict[key], scrub_mask)
]
if key_type == str:
new_feature_dict[key] = ' '.join(new_feature_dict[key])
return new_feature_dict
def filter_df(
df: pd.DataFrame, mask_op: MaskOp, op_kwargs: dict[str, SerializableType]
):
"""Filters a dataframe based on the output of the mask_op.
Args:
df: The dataframe to be filtered.
mask_op: The operation that generates the binary mask used for filtering.
op_kwargs: kwargs to be passed to the mask_op.
Returns:
The filtered dataframe
"""
mask_query = Query(op=mask_op, kwargs=op_kwargs)
return df[APPLY_FN[type(mask_query)](df, mask_query)]
def or_series(series_list: list[pd.Series]) -> pd.Series:
"""Performs an OR operation on a list of boolean pd.Series.
Args:
series_list: List of boolean pd.Series to perform OR on.
Returns:
The result of s_1 or ... or s_N, for s_i in series_list.
Raises:
TypeError: Some series in series_list is has non boolean values.
RuntimeError: The series's indexes in series_list don't match, potentially
meaning that series don't describe the same recordings.
"""
reference_indexes = series_list[0].index
if any(
[not series.index.equals(reference_indexes) for series in series_list]
):
raise RuntimeError('OR operation expects consistent Series as input')
if any([series.dtype != bool for series in series_list]):
raise TypeError('OR operation expects boolean Series as input.')
return functools.reduce(lambda s1, s2: s1.add(s2), series_list)
def and_series(series_list: list[pd.Series]) -> pd.Series:
"""Performs an AND operation on a list of boolean pd.Series.
Args:
series_list: List of boolean pd.Series to perform AND on.
Returns:
The result of s_1 and ... and s_N, for s_i in series_list.
Raises:
TypeError: Some series in series_list is has non boolean values.
RuntimeError: The series's indexes in series_list don't match, potentially
meaning that series don't describe the same recordings.
"""
reference_indexes = series_list[0].index
if any(
[not series.index.equals(reference_indexes) for series in series_list]
):
raise RuntimeError('AND operation expects consistent Series as input')
if any([series.dtype != bool for series in series_list]):
raise RuntimeError('AND operation expects boolean Series as input.')
return functools.reduce(lambda s1, s2: s1 * s2, series_list)
def concat_no_duplicates(df_list: list[pd.DataFrame]) -> pd.DataFrame:
"""Concatenates dataframes in df_list, then removes duplicates examples.
Args:
df_list: The list of dataframes to concatenate.
Returns:
The concatenated dataframe, where potential duplicated rows have been
dropped.
Raises:
RuntimeError: Some series in series_list don't share the same columns.
"""
reference_columns = set(df_list[0].columns)
if any([set(df.columns) != reference_columns for df in df_list]):
raise RuntimeError(
'Concatenation expects dataframes to share the exact '
'same set of columns.'
)
concat_df = pd.concat(df_list)
# List and np.ndarray are not hashable, therefore the method
# .duplicated() will raise an error if any of the value is of this type.
# Instead convert to tuples for the sake of duplicate verification.
duplicated = concat_df.applymap(
lambda e: tuple(e) if type(e) in [list, np.ndarray] else e
).duplicated()
return concat_df[~duplicated]
def filter_in_class_list(key: str, class_list_name: str) -> Query:
"""Creates a query filtering out labels not in the target class list.
Args:
key: Key for labels to filter. (eg, 'label'.)
class_list_name: Name of class list to draw labels from.
Returns:
Query for filtering.
"""
db = namespace_db.load_db()
classes = list(db.class_lists[class_list_name].classes)
return Query(
op=TransformOp.FILTER,
kwargs={
'mask_op': MaskOp.IN,
'op_kwargs': {
'key': key,
'values': classes,
},
},
)
def filter_not_in_class_list(key: str, class_list_name: str) -> Query:
"""Creates a query filtering out labels in the target class list.
Args:
key: Key for labels to filter. (eg, 'label'.)
class_list_name: Name of class list to draw labels from.
Returns:
Query for filtering.
"""
db = namespace_db.load_db()
classes = list(db.class_lists[class_list_name].classes)
return Query(
op=TransformOp.FILTER,
kwargs={
'mask_op': MaskOp.NOT_IN,
'op_kwargs': {
'key': key,
'values': classes,
},
},
)
def filter_contains_no_class_list(key: str, class_list_name: str) -> Query:
"""Creates a query filtering out labels not contains in the target class list.
Args:
key: The column used for filtering. (eg, 'label'.) Note that `df[key]` must
be a Sequence
class_list_name: Name of class list to remove labels from.
Returns:
Query for filtering.
"""
db = namespace_db.load_db()
classes = list(db.class_lists[class_list_name].classes)
return Query(
op=TransformOp.FILTER,
kwargs={
'mask_op': MaskOp.CONTAINS_NO,
'op_kwargs': {
'key': key,
'values': classes,
},
},
)
def filter_contains_any_class_list(key: str, class_list_name: str) -> Query:
"""Creates a query filtering out labels which contain any of class list.
Args:
key: The column used for filtering. (eg, 'label'.) Note that `df[key]` must
be a Sequence
class_list_name: Name of class list to remove labels from.
Returns:
Query for filtering.
"""
db = namespace_db.load_db()
classes = list(db.class_lists[class_list_name].classes)
return Query(
op=TransformOp.FILTER,
kwargs={
'mask_op': MaskOp.CONTAINS_ANY,
'op_kwargs': {
'key': key,
'values': classes,
},
},
)
def scrub_all_but_class_list(key: str, class_list_name: str) -> Query:
"""Scrub everything outside the chosen class list.
Args:
key: Key for labels to filter. (eg, 'label'.)
class_list_name: Name of class list containing labels to keep.
Returns:
Query for scrub operation.
"""
db = namespace_db.load_db()
classes = list(db.class_lists[class_list_name].classes)
return Query(
op=TransformOp.SCRUB_ALL_BUT,
kwargs={
'key': key,
'values': classes,
},
)
APPLY_FN = {
Query: apply_query,
QuerySequence: apply_sequence,
QueryComplement: apply_complement,
QueryParallel: apply_parallel,
}
MERGE_FN = {
MergeStrategy.OR: or_series,
MergeStrategy.AND: and_series,
MergeStrategy.CONCAT_NO_DUPLICATES: concat_no_duplicates,
}
OPS = {
# pylint: disable=g-long-lambda
MaskOp.IN: is_in,
MaskOp.CONTAINS_NO: contains_no,
MaskOp.CONTAINS_ANY: contains_any,
MaskOp.NOT_IN: is_not_in,
TransformOp.SAMPLE: su.sample_recordings,
TransformOp.SCRUB: lambda df, **kwargs: df.apply(
functools.partial(scrub, **kwargs), axis=1, result_type='expand'
),
TransformOp.SCRUB_ALL_BUT: lambda df, **kwargs: df.apply(
functools.partial(functools.partial(scrub, all_but=True), **kwargs),
axis=1,
result_type='expand',
),
TransformOp.FILTER: filter_df,
TransformOp.APPEND: append,
}
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chirp custom TFDS Features."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
class Int16AsFloatTensor(tfds.features.Audio):
"""An int16 tfds.features.Tensor represented as a float32 in [-1, 1).
Examples are stored as int16 tensors but encoded from and decoded into float32
tensors in the [-1, 1) range (1 is excluded because we divide the
[-2**15, 2**15 - 1] interval by 2**15).
"""
INT16_SCALE = float(1 << 15)
ALIASES = ['chirp.data.bird_taxonomy.bird_taxonomy.Int16AsFloatTensor']
def __init__(
self,
*,
file_format: str | None = None,
shape: tfds.typing.Shape,
dtype: tf.dtypes.DType = tf.float32,
sample_rate: tfds.typing.Dim,
encoding: str | tfds.features.Encoding = tfds.features.Encoding.NONE,
doc: tfds.features.DocArg = None,
lazy_decode: bool = False,
):
del file_format
del dtype
self._int16_tensor_feature = tfds.features.Tensor(
shape=shape, dtype=tf.int16, encoding=encoding
)
if lazy_decode:
raise ValueError('lazy decoding not supported')
super().__init__(
file_format=None,
shape=shape,
dtype=tf.float32,
sample_rate=sample_rate,
encoding=encoding,
doc=doc,
lazy_decode=lazy_decode,
)
def get_serialized_info(self):
return self._int16_tensor_feature.get_serialized_info()
def encode_example(self, example_data):
if not isinstance(example_data, np.ndarray):
example_data = np.array(example_data, dtype=np.float32)
if example_data.dtype != np.float32:
raise ValueError('dtype should be float32')
if example_data.min() < -1.0 or example_data.max() > 1.0 - (
1.0 / self.INT16_SCALE
):
raise ValueError('values should be in [-1, 1)')
return self._int16_tensor_feature.encode_example(
(example_data * self.INT16_SCALE).astype(np.int16)
)
def decode_example(self, tfexample_data):
int16_scale = tf.constant(self.INT16_SCALE, dtype=tf.float32)
decoded_data = tf.cast(
self._int16_tensor_feature.decode_example(tfexample_data), tf.float32
)
return decoded_data / int16_scale
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for data processing."""
import hashlib
import os.path
from typing import Any, Iterable
import chirp.data.bird_taxonomy # pylint: disable=unused-import
import chirp.data.soundscapes # pylint: disable=unused-import
from chirp.preprocessing import pipeline as pipeline_
import tensorflow as tf
import tensorflow_datasets as tfds
# Import bird_taxonomy and soundscapes to register the datasets with TFDS.
_DEFAULT_DATASET_DIR = None
_DEFAULT_TFDS_DATADIR = None
_DEFAULT_PIPELINE = None
def get_dataset(
split: str,
is_train: bool = False,
dataset_directory: str | Iterable[str] = _DEFAULT_DATASET_DIR,
tfds_data_dir: str | None = _DEFAULT_TFDS_DATADIR,
tf_data_service_address: Any | None = None,
pipeline: pipeline_.Pipeline | None = _DEFAULT_PIPELINE,
) -> tuple[tf.data.Dataset, tfds.core.DatasetInfo]:
"""Returns the placeholder dataset.
Args:
split: data split, e.g. 'train', 'test', 'train[:80%]', etc.
is_train: If the dataset will be used for training. This only affects
whether data will be distributed or not in case tf_data_service_address is
provided.
dataset_directory: dataset directory. If multiple are passed, then samples
are uniformly taken from each dataset. When multiple datasets are loaded,
only the dataset info of the first dataset is returned.
tfds_data_dir: If provided, uses tfds.add_data_dir, and then tfds.load,
instead of using the tfds.builder_from_directory.
tf_data_service_address: Address for TFDataService. Only used if is_train is
set.
pipeline: (required) A preprocessing pipeline to apply to the data.
Returns:
The placeholder dataset.
Raises:
ValueError: If no initialized Pipeline is passed.
RuntimeError: If no datasets are loaded.
"""
if isinstance(dataset_directory, str):
dataset_directory = [dataset_directory]
if pipeline is None:
raise ValueError(
'data_utils.get_dataset() requires a valid initialized Pipeline object '
'to be specified.'
)
read_config = tfds.ReadConfig(add_tfds_id=True)
datasets = []
dataset_info = None
for dataset_dir in dataset_directory:
if tfds_data_dir:
tfds.core.add_data_dir(tfds_data_dir)
ds, dataset_info = tfds.load(
dataset_dir,
split=split,
data_dir=tfds_data_dir,
with_info=True,
read_config=read_config,
shuffle_files=is_train,
)
else:
builder = tfds.builder_from_directory(dataset_dir)
ds = builder.as_dataset(
split=split, read_config=read_config, shuffle_files=is_train
)
dataset_info = builder.info
datasets.append(pipeline(ds, dataset_info))
if len(datasets) > 1:
ds = tf.data.Dataset.sample_from_datasets(datasets)
else:
ds = datasets[0]
if is_train and tf_data_service_address:
ds = ds.apply(
tf.data.experimental.service.distribute(
processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
service=tf_data_service_address,
job_name='chirp_job',
)
)
ds = ds.prefetch(2)
if dataset_info is None:
raise RuntimeError('No datasets loaded.')
return ds, dataset_info
def xeno_canto_filename(filename: str, id_: int) -> tuple[str, str]:
"""Determine a filename for a Xeno-Canto recording.
We can't use the original filename since some of those are not valid Unix
filenames (e.g., they contain slashes). Hence the files are named using just
their ID. There are some files with spaces in the extension, so that is
handled here as well.
We also return the first two characters of the MD5 hash of the filename. This
can be used to evenly distribute files across 256 directories in a
deterministic manner.
Args:
filename: The original filename (used to determine the extension).
id_: The Xeno-Canto ID of the recording.
Returns:
A tuple where the first element is the filename to save this recording two
and the second element is a two-character subdirectory name in which to
save the file.
"""
# Two files have the extension ". mp3"
ext = os.path.splitext(filename)[1].lower().replace(' ', '')
filename = f'XC{id_}{ext}'
# Results in ~2900 files per directory
subdir = hashlib.md5(filename.encode()).hexdigest()[:2]
return filename, subdir
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Datasets extensions for common acoustic archive formats."""
import abc
import dataclasses
import logging
from typing import Any, Iterable
from chirp import audio_utils
from chirp.data import tfds_features
from etils import epath
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
@dataclasses.dataclass
class WavDirectoryBuilderConfig(tfds.core.BuilderConfig):
sample_rate_hz: int = 16_000
interval_length_s: float = 6.0
max_peaks: int | None = None
@property
def context_duration_samples(self) -> int:
return int(round(self.interval_length_s * self.sample_rate_hz))
def _generate_context_windows(
wav_path: epath.Path, config: WavDirectoryBuilderConfig
) -> Iterable[tuple[str, dict[str, Any]]]:
"""Generates audio context window feature dicts from a single mono WAV file.
Args:
wav_path: A path readable by scipy.io.wavfile.
config: Desired properties of the extracted context windows.
Yields:
Key-values, where the key is {filename}:{start_time}ms and the value is a
feature dict conforming to WavDirectoryBuilder._info.
"""
wavfile = tfds.core.lazy_imports.scipy.io.wavfile
filename = str(wav_path)
try:
with wav_path.open('rb') as f:
sample_rate, samples = wavfile.read(f)
except ValueError as e:
# One case: a file with name ending in .wav starts with several 0 bytes.
logging.warning('skipped %s due to read() error: %s', wav_path, e)
return
assert len(samples.shape) == 1 # following code assumes mono
samples = samples.astype(np.float32) / -np.iinfo(np.int16).min
context_duration = config.context_duration_samples
segment_starts = set()
max_peaks = config.max_peaks
if max_peaks:
peak_indices = audio_utils.find_peaks_from_audio(
samples, sample_rate, max_peaks
)
peak_indices = np.asarray(peak_indices)
for midpoint in peak_indices:
segment_start = max(0, midpoint - context_duration // 2)
segment_starts.add(segment_start)
else:
segment_starts.update(range(0, len(samples), context_duration))
# Assertion failures saying "two examples share the same hashed key" have
# been observed from full-scale data. Here we'll guard against that by
# explicitly ensuring no duplicate keys are emitted from a single file.
keys_emitted = set()
for segment_start in sorted(segment_starts):
segment_end = segment_start + context_duration
if segment_end > len(samples):
break
context_window = samples[segment_start:segment_end]
start_ms = int(round(segment_start / config.sample_rate_hz * 1000))
key = f'{wav_path}:{start_ms:010d}ms'
if key in keys_emitted:
logging.warning('skipped yielding features for duplicate key: %s', key)
continue
yield key, {
'audio': context_window,
'segment_start': segment_start,
'segment_end': segment_end,
'filename': filename,
}
keys_emitted.add(key)
class WavDirectoryBuilder(tfds.core.GeneratorBasedBuilder):
"""Abstract base class for reading a nested directory of mono WAV files.
This provides the WAV reading, slicing into context windows, and a
configuration that filters to only windows with peaks. Concrete subclasses
need should set VERSION and RELEASE_NOTES and implement _description and
_citation.
"""
BUILDER_CONFIGS = [
# pylint: disable=unexpected-keyword-arg
WavDirectoryBuilderConfig(
name='unfiltered',
description=(
'Context windows covering the entire dataset with no overlap.'
),
),
WavDirectoryBuilderConfig(
name='slice_peaked',
description=(
'Context windows filtered to five peaks per original file.'
),
max_peaks=5,
)
# pylint: enable=unexpected-keyword-arg
]
MANUAL_DOWNLOAD_INSTRUCTIONS = """
Copy, into tensorflow_datasets/downloads/manual, a nested directory
structure containing the .wav files to be ingested.
"""
@abc.abstractmethod
def _description(self) -> str:
raise NotImplementedError()
@abc.abstractmethod
def _citation(self) -> str:
raise NotImplementedError()
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=self._description(),
features=tfds.features.FeaturesDict({
'audio': tfds_features.Int16AsFloatTensor(
shape=[self.builder_config.context_duration_samples],
sample_rate=self.builder_config.sample_rate_hz,
encoding=tfds.features.Encoding.ZLIB,
),
'segment_start': tfds.features.Scalar(dtype=tf.uint64),
'segment_end': tfds.features.Scalar(dtype=tf.uint64),
'filename': tfds.features.Text(),
}),
supervised_keys=None,
citation=self._citation(),
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
return {
'train': self._generate_examples(dl_manager.manual_dir),
}
def _generate_examples(self, root_dir: epath.PathLike):
"""Walks a directory and generates fixed duration slices of all WAV files.
Args:
root_dir: Directory path from which to read WAV files.
Returns:
PTransform from a WAV file path to a generator key-value pairs
[filename:start_millis, Example dict].
"""
beam = tfds.core.lazy_imports.apache_beam
wav_paths = []
def _walk(wav_dir: epath.Path):
"""Manually walks the tree under root_dir, collecting WAV paths."""
# needed because epath intentionally does not implement recursive glob.
for entry in wav_dir.iterdir():
if entry.is_file() and (entry.suffix in ['.wav', '.WAV']):
wav_paths.append(entry)
if entry.is_dir():
_walk(entry)
_walk(epath.Path(root_dir))
return beam.Create(wav_paths) | beam.ParDo(
_generate_context_windows, config=self.builder_config
)
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for manipulating soundscape data and annotations."""
import dataclasses
import os
from typing import Any, Callable, Iterator, Set
from absl import logging
from chirp.taxonomy import namespace
from chirp.taxonomy import namespace_db
from etils import epath
from jax import numpy as jnp
import numpy as np
import pandas as pd
import tensorflow_datasets as tfds
import tqdm
_AUDIO_EXTENSIONS = ['.flac', '.wav']
LocalizationFn = Callable[[Any, int, float, int], jnp.ndarray]
MAX_INTERVALS_PER_FILE = 200
UNKNOWN_LABEL = namespace.UNKNOWN_LABEL
@dataclasses.dataclass
class MetadataFeature:
"""Data for handling a metadata feature.
Attricbutes:
source_key: Key used for the metadata in the original dataset.
target_key: New key used for the feature in the output dataset.
convert_fn: Function for parsing metadata feature from the original dataset.
(For example, to convert strings in a CSV file to floats.)
feature_type: TFDS feature type, which is used in the TFDS FeatureDict.
"""
source_key: str
target_key: str
convert_fn: Callable[[str], Any]
feature_type: tfds.features.tensor_feature.Tensor
MetadataLoaderType = Callable[
[epath.Path, dict[str, MetadataFeature]], pd.DataFrame
]
def load_class_list(
class_list_name: str, keep_unknown_annotation: bool
) -> namespace.ClassList:
"""Loads the target class list, possibly adding an unknown label.
Args:
class_list_name: Name of the class list to load.
keep_unknown_annotation: If True, add an 'unknown' class to the ClassList.
Returns:
The desired ClassList.
"""
db = namespace_db.load_db()
dataset_class_list = db.class_lists[class_list_name]
if (
keep_unknown_annotation
and UNKNOWN_LABEL not in dataset_class_list.classes
):
# Create a new class list which includes the 'unknown' class.
dataset_class_list = namespace.ClassList(
dataset_class_list.namespace,
(UNKNOWN_LABEL,) + dataset_class_list.classes,
)
return dataset_class_list
def create_segments_df(
all_audio_filepaths: Iterator[epath.Path],
annotations_df: pd.DataFrame | None,
supervised: bool,
metadata_dir: epath.Path,
metadata_fields: dict[str, MetadataFeature],
metadata_load_fn: MetadataLoaderType | None,
) -> pd.DataFrame:
"""Create the dataframe of segments with annotations and audio urls.
Args:
all_audio_filepaths: Iterator for audio sources.
annotations_df: DataFrame of annotations.TimeWindowAnnotation.
supervised: Whether this is a supervised dataset.
metadata_dir: Directory containing the dataset's metadata. Only considered
if metadata_load_fn is provided.
metadata_fields: Dictionary describing handling of metadata features.
metadata_load_fn: Function for loading metadata.
Returns:
DataFrame of dataset annotations with metadata.
"""
if supervised:
# Combine segments with additional metadata (e.g Country).
segments = combine_annotations_with_metadata(
annotations_df, metadata_dir, metadata_fields, metadata_load_fn
)
logging.info('starting with %d annotations...', len(segments))
segments = add_annotated_urls(segments, all_audio_filepaths)
else:
# For unsupervised data, we have access to a set of non-annotated audio
# files. Therefore, we collect them, and attach an "unknown" labelled
# segment to each of the audio files.
segments = pd.DataFrame(all_audio_filepaths, columns=['url'])
segments['filename'] = segments['url'].apply(lambda x: x.stem)
# For compatibility, we add an "unknown" annotation to the recording
# dataframe that goes from start to end. That ensures that any interval
# detected as signal by our localization function will appear in the
# final audio set, with the 'unknown' annotation.
segments['start_time_s'] = 0
segments['end_time_s'] = -1
segments['label'] = [['unknown'] for _ in range(len(segments))]
logging.info('%s annotated segments detected', len(segments))
return segments
def combine_annotations_with_metadata(
segments: pd.DataFrame,
metadata_dir: epath.Path,
metadata_fields: dict[str, MetadataFeature],
metadata_load_fn: MetadataLoaderType | None,
metadata_df: pd.DataFrame | None = None,
) -> pd.DataFrame:
"""Combine segments with whatever metadata is available for this dataset.
Args:
segments: DataFrame of annotations.TimeWindowAnnotation
metadata_dir: Directory containing the dataset's metadata. Only considered
if metadata_load_fn is provided.
metadata_fields: Dictionary describing handling of metadata features.
metadata_load_fn: Function for loading metadata.
metadata_df: DataFrame of pre-loaded metadata. (testing convenience.)
Returns:
DataFrame of joined annotations and metadata.
"""
if metadata_load_fn is None:
return segments
if metadata_df is None:
# Load the dataframe containing the metadata. Each row describes some audio
# file, and the dataframe should contain the 'filename' column, which acts
# as the key to match with segments.
metadata_df = metadata_load_fn(metadata_dir, metadata_fields)
fid_to_metadata_index = metadata_df.groupby('filename').groups
combined_segments = []
bar = tqdm.tqdm(segments.iterrows(), total=len(segments))
bar.set_description('Combining segments will full metadata.')
for _, segment in bar:
fid = segment['filename']
segment_metadata = metadata_df.loc[fid_to_metadata_index[fid]]
if segment_metadata.empty:
logging.warning('MediaId %d not found in metadata', fid)
for field in metadata_fields.values():
if field.target_key == 'filename':
# filename is special and we don't want to overwrite it.
continue
segment[field.target_key] = field.convert_fn(
segment_metadata[field.target_key].iloc[0]
)
combined_segments.append(segment)
concat_segments = pd.concat(combined_segments, axis=1).T
return concat_segments
def add_annotated_urls(
segments: pd.DataFrame, all_audio_filepaths: Iterator[epath.Path]
) -> pd.DataFrame:
"""Creates URLs for annotated segments, matching them to audio files.
Args:
segments: DataFrame of annotations and metadata.
all_audio_filepaths: Iterator for audio sources.
Returns:
Updated segments DataFrame with URL's for existent audio sources.
Raises:
ValueError if no URLs are found.
"""
# Our strategy is to match file stems, while checking that there
# are no collisions. This works for all known soundscape datasets,
# which typically have very structured filenames even if there are
# multiple levels of file organization.
stem_to_filepath = {}
for fp in all_audio_filepaths:
stem = fp.stem.split('.')[0]
if stem in stem_to_filepath:
raise ValueError(
'Found two files (%s vs %s) with the same stem.'
% (fp, stem_to_filepath[stem])
)
stem_to_filepath[stem] = fp
segments['stem'] = segments['filename'].apply(
lambda filename: os.path.basename(filename).split('.')[0]
)
# Log all segments that could not be matched to an actual audio file.
audio_not_found = segments[
segments['stem'].apply(lambda stem: stem not in stem_to_filepath)
]
logging.info(
'Audios that could not be found: %s.', audio_not_found['stem'].unique()
)
segments['url'] = segments.apply(
lambda rec: stem_to_filepath.get(rec['stem'], ''), axis=1
)
# Filter segments without urls.
segments = segments[segments['url'].apply(lambda url: url != '')] # pylint: disable=g-explicit-bool-comparison
if segments.empty:
raise ValueError(
'No segments found. Likely a problem matching '
'annotation filenames to audio.'
)
segments = segments.drop('stem', axis=1)
return segments
def _has_overlap(start1, end1, start2, end2):
"""Check whether two time windows overlap."""
# no overlap, interval < anno
if end1 < start2:
return False
# no overlap, interval > anno
if end2 < start1:
return False
return True
def get_full_length_annotations(
audio: np.ndarray,
file_segments: pd.DataFrame,
class_list: namespace.ClassList,
sample_rate_hz: int,
unknown_guard: bool = False,
) -> pd.DataFrame:
"""Extracts annotations from file segments for full-length recordings.
`file_segments` corresponds to the segments annotated by recordists.
Args:
audio: The full audio file, already loaded.
file_segments: The annotated segments for this audio. Each row (=segment)
must minimally contain the following fields: ['label', 'start_time_s',
'end_time_s'].
class_list: List of labels which will appear in the processed dataset.
sample_rate_hz: Sample rate of audio.
unknown_guard: If True, add an "unknown" annotation from the beginning of
the recording to the beginning of the first annotation and another
"unknown" annotation from the end of the last annotation to the end of the
recording.
Returns:
annotations: A DataFrame of annotations with the same columns as
`file_segments`.
"""
logging.info('Found %d annotations for target file.', len(file_segments))
annotations = file_segments.copy()
beam = tfds.core.lazy_imports.apache_beam
# Convert start and end times into array indices.
to_index = lambda t: audio.shape[-1] if t == -1 else int(t * sample_rate_hz)
annotations['annotation_start'] = annotations['start_time_s'].map(to_index)
annotations['annotation_end'] = annotations['end_time_s'].map(to_index)
# Discard malformed segments, i.e., segments for which the end time is
# anterior to the start time.
malformed_segment = (
annotations['annotation_end'] < annotations['annotation_start']
)
if malformed_segment.sum() > 0:
logging.warning(
(
'Skipping %d annotated segment(s) because end time is anterior '
'to start time.'
),
malformed_segment.sum(),
)
beam.metrics.Metrics.counter('soundscapes', 'dropped_malformed').inc(
malformed_segment.sum()
)
# Split multi-label annotations into multiple single-label annotations.
annotations = annotations.explode(column='label')
# Discard annotations with labels not in the class list.
is_in_class_list = annotations['label'].isin(class_list.classes)
if (~is_in_class_list).sum() > 0:
logging.info(
(
'Skipping %d annotated segment(s) because the corresponding label'
' is not in the class list.'
),
(~is_in_class_list).sum(),
)
for label in annotations[~is_in_class_list]['label']:
beam.metrics.Metrics.counter('soundscapes', f'dropped_{label}').inc()
annotations = annotations[~malformed_segment & is_in_class_list]
if unknown_guard:
prefix_annotation = annotations.iloc[:1].copy()
prefix_annotation['label'] = UNKNOWN_LABEL
prefix_annotation['annotation_start'] = 0
prefix_annotation['annotation_end'] = annotations['annotation_start'].min()
prefix_annotation['start_time_s'] = 0.0
prefix_annotation['end_time_s'] = annotations['start_time_s'].min()
suffix_annotation = annotations.iloc[-1:].copy()
suffix_annotation['label'] = UNKNOWN_LABEL
suffix_annotation['annotation_start'] = annotations['annotation_end'].max()
suffix_annotation['annotation_end'] = audio.shape[-1]
prefix_annotation['start_time_s'] = annotations['end_time_s'].max()
prefix_annotation['end_time_s'] = audio.shape[-1] / sample_rate_hz
annotations = pd.concat(
[prefix_annotation, annotations, suffix_annotation],
axis='rows',
ignore_index=True,
)
return annotations
def get_labeled_intervals(
audio: np.ndarray,
file_segments: pd.DataFrame,
class_list: namespace.ClassList,
sample_rate_hz: int,
interval_length_s: int,
localization_fn: LocalizationFn,
drop_unknown_segments: bool,
) -> dict[tuple[int, int], Set[str]]:
"""Slices the given audio, and produces labels intervals.
`file_segments` corresponds to the segments annotated by recordists. The
final intervals correspond to slices of the audio where actual signal
is observed (according to the `slice_peaked_audio` function), and the
corresponding labels correspond to the label from annotated segments which
overlap with the slice.
Args:
audio: The full audio file, already loaded.
file_segments: The annotated segments for this audio. Each row (=segment)
must minimally contain the following fields: ['label', 'start_time_s',
'end_time_s'].
class_list: List of labels which will appear in the processed dataset.
sample_rate_hz: Sample rate of audio.
interval_length_s: Window size to slice.
localization_fn: Function for selecting audio intervals.
drop_unknown_segments: If True, segments containing any UNKNOWN_LABEL will
be omitted from the dataset.
Returns:
labeled_intervals: A Dict mapping a (start, end) time of the recording to
the set of classes present in that interval.
"""
logging.info('Found %d annotations for target file.', len(file_segments))
beam = tfds.core.lazy_imports.apache_beam
# Slice the audio into intervals
# Returns `interval_length_s` long intervals.
audio_intervals = [
(int(st), int(end))
for (st, end) in localization_fn(
audio, sample_rate_hz, interval_length_s, MAX_INTERVALS_PER_FILE
)
]
interval_timestamps = sorted(audio_intervals)
def _start_end_key(seg):
if seg['end_time_s'] == -1:
end = audio.shape[-1]
else:
end = int(sample_rate_hz * seg['end_time_s'])
if seg['end_time_s'] < seg['start_time_s']:
logging.warning(
'Skipping annotated segment because end time is anterior to start '
'time.'
)
return ()
return (int(sample_rate_hz * seg['start_time_s']), end)
# Search for intervals with annotations.
segments_by_timestamp = {
_start_end_key(seg): seg
for _, seg in file_segments.iterrows()
if _start_end_key(seg)
}
labeled_intervals = {}
for st, end in interval_timestamps:
interval_labels = set([])
for (
current_annotation_start,
currrent_annotation_end,
), seg in segments_by_timestamp.items():
if not _has_overlap(
st, end, current_annotation_start, currrent_annotation_end
):
continue
# found an overlap!
for label in seg['label']:
if label in class_list.classes:
interval_labels.add(label)
else:
logging.info('dropping label not in class list: %s', str(label))
beam.metrics.Metrics.counter('soundscapes', f'dropped_{label}').inc()
if not interval_labels:
beam.metrics.Metrics.counter('soundscapes', 'no_interval_labels').inc()
continue
if drop_unknown_segments and UNKNOWN_LABEL in interval_labels:
beam.metrics.Metrics.counter('soundscapes', 'skipped_unknown').inc()
logging.info(
'skipping unknown segment with labels %s', str(interval_labels)
)
continue
beam.metrics.Metrics.counter('soundscapes', 'labeled_intervals').inc()
beam.metrics.Metrics.counter('soundscapes', 'total_labls').inc(
len(interval_labels)
)
labeled_intervals[(st, end)] = interval_labels
return labeled_intervals
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""birdclef dataset."""
# pylint: disable=unused-import
from .soundscapes import Soundscapes
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config utils specific to BirdClef Soundscape datasets."""
import csv
import os
from chirp.data.soundscapes import soundscapes_lib
from chirp.taxonomy import annotations
from chirp.taxonomy import namespace_db
from etils import epath
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
import tqdm
_DEPRECATED2NEW = {
'mallar': 'mallar3',
'rufant1': 'rufant7',
}
def load_birdclef_metadata(
root: epath.Path,
metadata_feature_info: dict[str, soundscapes_lib.MetadataFeature],
) -> pd.DataFrame:
"""The `metadata_load_fn` for Birdclef2019-based configs.
Args:
root: Base dataset path.
metadata_feature_info: Dictionary describing the desired metadata features.
Returns:
DataFrame of metadata parsed from the dataset.
"""
metadata_path = root / 'birdclef2019' / 'metadata'
df = []
bar = tqdm.tqdm(metadata_path.iterdir())
bar.set_description('Loading BirdClef2019 metadata.')
for path in bar:
with path.open('rb') as f:
df.append(pd.read_json(f, typ='series'))
df = pd.concat(df, axis=1).T
for feature in metadata_feature_info.values():
df[feature.target_key] = df[feature.source_key].map(feature.convert_fn)
df = df.drop(feature.source_key, axis=1)
return df
def birdclef_metadata_features() -> dict[str, soundscapes_lib.MetadataFeature]:
"""Metadata features to join with BirdClef data."""
feature_types = {
'filename': soundscapes_lib.MetadataFeature(
'FileName', 'filename', str, tfds.features.Text()
),
'country': soundscapes_lib.MetadataFeature(
'Country', 'country', str, tfds.features.Text()
),
'longitude': soundscapes_lib.MetadataFeature(
'Longitude',
'longitude',
float,
tfds.features.Scalar(dtype=tf.float32),
),
'latitude': soundscapes_lib.MetadataFeature(
'Latitude', 'latitude', float, tfds.features.Scalar(dtype=tf.float32)
),
'elevation': soundscapes_lib.MetadataFeature(
'Elevation',
'elevation',
float,
tfds.features.Scalar(dtype=tf.float32),
),
'recordist': soundscapes_lib.MetadataFeature(
'AuthorID', 'recordist', str, tfds.features.Text()
),
}
return feature_types
def load_caples_annotations(annotations_path: epath.Path) -> pd.DataFrame:
"""Loads the dataframe of all caples annotations from annotation CSV.
Args:
annotations_path: Filepath for the annotations CSV.
Returns:
DataFrame of annotations.
"""
filename_fn = lambda _, row: row['fid'].strip()
start_time_fn = lambda row: float(row['start_time_s'])
end_time_fn = lambda row: float(row['end_time_s'])
# Get rid of the one bad label in the dataset...
filter_fn = lambda row: 'comros' in row['ebird_codes']
class_fn = lambda row: row['ebird_codes'].split(' ')
annos = annotations.read_dataset_annotations_csvs(
[annotations_path],
filename_fn=filename_fn,
namespace='ebird2021',
class_fn=class_fn,
start_time_fn=start_time_fn,
end_time_fn=end_time_fn,
filter_fn=filter_fn,
)
segments = annotations.annotations_to_dataframe(annos)
return segments
def load_cornell_annotations(annotations_path: epath.Path) -> pd.DataFrame:
"""Load the annotations from a Cornell Zenodo dataset."""
start_time_fn = lambda row: float(row['Start Time (s)'])
end_time_fn = lambda row: float(row['End Time (s)'])
filter_fn = lambda row: False
class_fn = lambda row: [ # pylint: disable=g-long-lambda
row['Species eBird Code'].strip().replace('????', 'unknown')
]
filename_fn = lambda filepath, row: row['Filename'].strip()
annos = annotations.read_dataset_annotations_csvs(
[annotations_path],
filename_fn=filename_fn,
namespace='ebird2021',
class_fn=class_fn,
start_time_fn=start_time_fn,
end_time_fn=end_time_fn,
filter_fn=filter_fn,
)
segments = annotations.annotations_to_dataframe(annos)
return segments
# TODO(tomdenton): Eliminate these 'combine' functions.
# Reading directly from the set of annotation files will be more direct and
# less error prone when updating datasets.
def combine_powdermill_annotations(
dataset_path: epath.Path, output_filepath: epath.Path
) -> None:
"""Combine all Powdermill dataset annotations into a single csv."""
tables = dataset_path.glob('*/*.txt')
fieldnames = [
'Selection',
'View',
'Channel',
'Begin Time (s)',
'End Time (s)',
'High Freq (Hz)',
'Low Freq (Hz)',
'Species',
]
rows = []
for table_fp in tables:
with table_fp.open('r') as f:
reader = csv.DictReader(f, delimiter='\t', fieldnames=fieldnames)
subdir_name = table_fp.parent.name
audio_filename = os.path.basename(table_fp).split('.')[0] + '.wav'
for row in reader:
# Some annotation files have a header, and some do not.
# So we skip the headers when present.
if row['View'] == 'View':
continue
# The filename in the row doesn't include the file's directory.
row['Filename'] = os.path.join(subdir_name, audio_filename)
rows.append(row)
with output_filepath.open('w') as f:
fieldnames.append('Filename')
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
def load_powdermill_annotations(annotations_path: epath.Path) -> pd.DataFrame:
"""Load the dataframe of all Powdermill annotations from annotation CSV."""
start_time_fn = lambda row: float(row['Begin Time (s)'])
end_time_fn = lambda row: float(row['End Time (s)'])
filter_fn = lambda row: False
# Convert dataset labels to ebird2021.
db = namespace_db.load_db()
ebird_mapping = db.mappings['ibp2019_to_ebird2021']
ebird_mapping_dict = ebird_mapping.mapped_pairs
class_fn = lambda row: [ # pylint: disable=g-long-lambda
ebird_mapping_dict.get(row['Species'].strip(), row['Species'].strip())
]
annotation_filepaths = [annotations_path]
filename_fn = lambda filepath, row: row['Filename'].strip()
annos = annotations.read_dataset_annotations_csvs(
annotation_filepaths,
filename_fn=filename_fn,
namespace=ebird_mapping.target_namespace,
class_fn=class_fn,
start_time_fn=start_time_fn,
end_time_fn=end_time_fn,
filter_fn=filter_fn,
)
segments = annotations.annotations_to_dataframe(annos)
return segments
def load_weldy_annotations(annotations_path: epath.Path) -> pd.DataFrame:
"""Loads a dataframe of all annotations from the Weldy Calltype dataset."""
filename_fn = lambda _, row: 'annotated_recordings/' + row['file'].strip()
start_time_fn = lambda row: float(row['start'])
end_time_fn = lambda row: float(row['end'])
filter_fn = lambda row: False
class_fn = lambda row: ( # pylint: disable=g-long-lambda
row['label']
.replace('unk', 'unknown')
.replace('impossible', 'unknown')
.replace('unknown_chip', 'unknown')
.split(' ')
)
annos = annotations.read_dataset_annotations_csvs(
[epath.Path(annotations_path)],
filename_fn=filename_fn,
namespace='weldy_calltype',
class_fn=class_fn,
start_time_fn=start_time_fn,
end_time_fn=end_time_fn,
filter_fn=filter_fn,
)
segments = annotations.annotations_to_dataframe(annos)
return segments
def load_anuraset_annotations(annotations_path: epath.Path) -> pd.DataFrame:
"""Loads a dataframe of all annotations."""
filename_fn = lambda _, row: os.path.join( # pylint: disable=g-long-lambda
row['filename'].split('_')[0], row['filename'].strip()
)
start_time_fn = lambda row: float(row['start_time_s'])
end_time_fn = lambda row: float(row['end_time_s'])
# There are a few SPECIES_LALSE labels which according to the authors should
# be ignored.
filter_fn = lambda row: '_LALSE' in row['label']
class_fn = lambda row: row['label'].split(' ')
annos = annotations.read_dataset_annotations_csvs(
[epath.Path(annotations_path)],
filename_fn=filename_fn,
namespace='anuraset',
class_fn=class_fn,
start_time_fn=start_time_fn,
end_time_fn=end_time_fn,
filter_fn=filter_fn,
)
segments = annotations.annotations_to_dataframe(annos)
return segments
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Soundscape datasets."""
import dataclasses
import tempfile
from typing import Any, Callable, Sequence
import warnings
from absl import logging
from chirp import audio_utils
from chirp.data import tfds_features
from chirp.data.bird_taxonomy import bird_taxonomy
from chirp.data.soundscapes import dataset_fns
from chirp.data.soundscapes import soundscapes_lib
from etils import epath
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
_DESCRIPTION = """
Soundscape datasets.
"""
_CITATION = """
@inproceedings{kahl2019overview,
title={Overview of BirdCLEF 2019: large-scale bird recognition in soundscapes},
author={Kahl, Stefan and St{\"o}ter, Fabian-Robert and Go{\"e}au, Herv{\'e} and Glotin, Herv{\'e} and Planque, Robert and Vellinga, Willem-Pier and Joly, Alexis},
booktitle={Working Notes of CLEF 2019-Conference and Labs of the Evaluation Forum},
number={2380},
pages={1--9},
year={2019},
organization={CEUR}
}
"""
@dataclasses.dataclass
class SoundscapesConfig(bird_taxonomy.BirdTaxonomyConfig):
"""Dataset configuration for Soundscape datasets.
Attributes:
audio_glob: Pattern to match to find audio files.
class_list_name: The name of the ClassList to use for labels. This is
typically a list of either all regionally feasible species in the area
(for fully-annotated datasets) or the list of all species annotated (if
only a subset has been labeled).
metadata_load_fn: Because the metadata don't always appear under the same
format, we specify for each config the way to load metadata. This function
outputs a dataframe, where each row contains the metadata for some audio
file. The column names of the dataframe should match the keys in
metadata_fields.
metadata_fields: Maps the fields of the metadata DataFrame to tfds.features
datatypes.
annotation_filename: Filename for the annotations file. Defaults to None, in
which case the filename is derived from the config name.
annotation_load_fn: Because the annotations don't always appear in the same
format, we specify a function to load the annotations.
keep_unknown_annotation: An "unknown" annotations appears in some datasets.
This boolean decides whether it should keep this annotation (and
therefore) add a species named "unknown" in the label space, or just scrub
all "unknown" annotations.
full_length_unknown_guard: If True, add an "unknown" annotation from the
beginning of the recording to the beginning of the first annotation and
another "unknown" annotation from the end of the last annotation to the
end of the recording.
supervised: Whether this is a supervised dataset. If so, any segment which
overlaps an 'unknown' label will be dropped (to avoid downward bias on
eval stats).
audio_dir: Base directory for soundscapes data.
"""
audio_glob: str = ''
class_list_name: str = ''
metadata_load_fn: soundscapes_lib.MetadataLoaderType | None = None
metadata_fields: dict[str, soundscapes_lib.MetadataFeature] | None = None
annotation_filename: str | None = None
annotation_load_fn: Callable[[epath.Path], pd.DataFrame] | None = None
keep_unknown_annotation: bool = False
full_length_unknown_guard: bool = False
supervised: bool = True
audio_dir = epath.Path('gs://chirp-public-bucket/soundscapes')
class Soundscapes(bird_taxonomy.BirdTaxonomy):
"""DatasetBuilder for soundscapes data."""
VERSION = tfds.core.Version('1.3.0')
RELEASE_NOTES = {
'1.0.0': (
'Initial release. The label set corresponds to the full '
'set of ~11 000 Xeno-Canto species.'
),
'1.0.1': (
'The label set is now restricted to the species present in each'
'dataset.'
),
'1.0.2': (
'Streamlines data handling, and adds handling for a new '
'Sapsucker Woods dataset.'
),
'1.0.3': (
'Adds handling for the new Cornell Sierra Nevadas dataset and '
'the KitzesLab Powdermill dataset.'
),
'1.0.4': 'Adds a unique recording ID and a segment ID to all samples.',
'1.0.5': (
'Adds Peru dataset and moves to new version of SSW annotations. '
'Supervised segments with the "unknown" label are now dropped.'
),
'1.0.6': 'Updates the dataset following Int16AsFloatTensor refactoring.',
'1.0.7': 'Fix some dropped annotations in the Hawaii dataset.',
'1.1.0': 'Adds full-length variants.',
'1.2.0': 'Updated ebird2021 taxonomy.',
'1.3.0': (
'Switch to Zenodo versions of soundscape datasets from Cornell. '
'Add the Colombia+Costa Rica dataset, remove the Colombia-only '
'BirdCLEF dataset.'
),
}
BUILDER_CONFIGS = [
# pylint: disable=unexpected-keyword-arg
SoundscapesConfig(
name='caples', # TODO(mboudiaf) Try to interface caples metadata.
class_list_name='caples',
audio_glob='caples/audio/*',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_caples_annotations,
description='Annotated Caples recordings from 2018/2019.',
),
SoundscapesConfig(
name='caples_full_length',
class_list_name='caples',
audio_glob='caples/audio/*',
annotation_filename='caples.csv',
annotation_load_fn=dataset_fns.load_caples_annotations,
keep_unknown_annotation=True,
# Some recordings in Caples are only partially-annotated, so to avoid
# scoring legitimate model predictions as false positives we pad with
# "unknown" annotations before the first annotation and after the last
# annotation.
full_length_unknown_guard=True,
description='Full-length annotated Caples recordings from 2018/2019.',
),
SoundscapesConfig(
name='hawaii',
audio_glob='hawaii/audio/*.flac',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Fully annotated Hawaii recordings. '
'https://zenodo.org/record/7078499'
),
class_list_name='hawaii',
),
SoundscapesConfig(
name='hawaii_full_length',
audio_glob='hawaii/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length, fully annotated Hawaii recordings. '
'https://zenodo.org/record/7078499'
),
class_list_name='hawaii',
),
SoundscapesConfig(
name='ssw',
audio_glob='ssw/audio/*.flac',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
description=(
'Annotated Sapsucker Woods recordings. '
'https://zenodo.org/record/7018484'
),
class_list_name='ssw',
),
SoundscapesConfig(
name='ssw_full_length',
audio_glob='ssw/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length, annotated Sapsucker Woods recordings. '
'https://zenodo.org/record/7018484'
),
class_list_name='ssw',
),
SoundscapesConfig(
name='coffee_farms',
audio_glob='coffee_farms/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
keep_unknown_annotation=True,
description=(
'Colombian and Costa Rican coffee farm recordings. '
'https://zenodo.org/record/7525349'
),
class_list_name='coffee_farms',
),
SoundscapesConfig(
name='coffee_farms_full_length',
audio_glob='coffee_farms/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length Colombian and Costa Rican coffee farm recordings. '
'https://zenodo.org/record/7525349'
),
class_list_name='coffee_farms',
),
SoundscapesConfig(
name='high_sierras',
audio_glob='high_sierras/audio/*.flac',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'High Sierras recordings. https://zenodo.org/record/7525805'
),
class_list_name='high_sierras',
),
SoundscapesConfig(
name='high_sierras_full_length',
audio_glob='high_sierras/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length High Sierras recordings. '
'https://zenodo.org/record/7525805'
),
class_list_name='high_sierras',
),
SoundscapesConfig(
name='sierras_kahl',
audio_glob='sierras_kahl/audio/*.flac',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Sierra Nevada recordings. https://zenodo.org/record/7050014'
),
class_list_name='sierras_kahl',
),
SoundscapesConfig(
name='sierras_kahl_full_length',
audio_glob='sierras_kahl/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length Sierra Nevada recordings. '
'https://zenodo.org/record/7050014'
),
class_list_name='sierras_kahl',
),
SoundscapesConfig(
name='powdermill',
audio_glob='powdermill/*/*.wav',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_powdermill_annotations,
description=(
'New England recordings from Powdermill Nature Reserve, '
'Rector, PA. https://doi.org/10.1002/ecy.3329'
),
class_list_name='powdermill',
),
SoundscapesConfig(
name='powdermill_full_length',
audio_glob='powdermill/*/*.wav',
annotation_filename='powdermill.csv',
annotation_load_fn=dataset_fns.load_powdermill_annotations,
keep_unknown_annotation=True,
description=(
'Full-length New England recordings from Powdermill '
'Nature Reserve, Rector, PA. '
'https://doi.org/10.1002/ecy.3329'
),
class_list_name='powdermill',
),
SoundscapesConfig(
name='peru',
audio_glob='peru/audio/*.flac',
interval_length_s=5.0,
localization_fn=audio_utils.slice_peaked_audio,
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Soundscapes from the SW Amazon basin. '
'https://zenodo.org/record/7079124#.YypL8-xufhM'
),
class_list_name='peru',
),
SoundscapesConfig(
name='peru_full_length',
audio_glob='peru/audio/*.flac',
annotation_load_fn=dataset_fns.load_cornell_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length soundscapes from the SW Amazon basin. '
'https://zenodo.org/record/7079124#.YypL8-xufhM'
),
class_list_name='peru',
),
SoundscapesConfig(
name='weldy_calltype_full_length',
audio_glob='weldy_calltype/annotated_recordings/*.wav',
annotation_load_fn=dataset_fns.load_weldy_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length annotated bird call types from the PNW. '
'https://zenodo.org/record/8047850'
),
class_list_name='weldy_calltype',
),
SoundscapesConfig(
name='anuraset_full_length',
audio_glob='anuraset/raw_data/*/*.wav',
annotation_load_fn=dataset_fns.load_anuraset_annotations,
annotation_filename='annotations.csv',
keep_unknown_annotation=True,
description=(
'Full-length annotated frog vocalizations. '
'https://zenodo.org/record/8056090'
),
class_list_name='anuraset',
),
]
def _info(self) -> tfds.core.DatasetInfo:
dataset_class_list = soundscapes_lib.load_class_list(
self.builder_config.class_list_name,
self.builder_config.keep_unknown_annotation,
)
logging.info(
'Currently considering a total of %s species.',
len(dataset_class_list.classes),
)
full_length = self.builder_config.localization_fn is None
if full_length:
audio_feature_shape = [None]
else:
audio_feature_shape = [
int(
self.builder_config.sample_rate_hz
* self.builder_config.interval_length_s
)
]
common_features = {
'audio': tfds_features.Int16AsFloatTensor(
shape=audio_feature_shape,
sample_rate=self.builder_config.sample_rate_hz,
encoding=tfds.features.Encoding.ZLIB,
),
'label': tfds.features.Sequence(
tfds.features.ClassLabel(names=dataset_class_list.classes)
),
'filename': tfds.features.Text(),
'recording_id': tfds.features.Scalar(dtype=tf.uint64),
'segment_id': tfds.features.Scalar(dtype=tf.int64),
'segment_start': tfds.features.Scalar(dtype=tf.uint64),
'segment_end': tfds.features.Scalar(dtype=tf.uint64),
}
if full_length:
common_features.update({
'annotation_start': tfds.features.Sequence(
tfds.features.Scalar(dtype=tf.uint64)
),
'annotation_end': tfds.features.Sequence(
tfds.features.Scalar(dtype=tf.uint64)
),
})
if self.builder_config.metadata_load_fn is not None:
if self.builder_config.metadata_fields is None:
raise ValueError(
"If a 'metadata_load_fn' is specified, then the"
"'metadata_fields' mapping must also be specied."
)
additional_features = {
k.target_key: k.feature_type
for k in self.builder_config.metadata_fields.values()
}
common_features.update(additional_features)
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict(common_features),
supervised_keys=('audio', 'label'),
homepage='https://github.com/google-research/chirp',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
# Defined as part of the tfds API for dividing the dataset into splits.
# https://www.tensorflow.org/datasets/add_dataset#specifying_dataset_splits
dl_manager._force_checksums_validation = ( # pylint: disable=protected-access
False
)
# Get the state from the dl_manager which we'll use to create segments.
all_audio_filenames = self.builder_config.audio_dir.glob(
self.builder_config.audio_glob
)
if self.builder_config.supervised:
# For supervised data, we first grab the annotated segments.
filename = (
self.builder_config.annotation_filename
or f'{self.builder_config.class_list_name}.csv'
)
annotations_path = dl_manager.download_and_extract({
'segments': (
self.builder_config.audio_dir
/ self.builder_config.class_list_name
/ filename
).as_posix(),
})['segments']
annotations_df = self.builder_config.annotation_load_fn(annotations_path)
else:
annotations_df = None
segments = soundscapes_lib.create_segments_df(
all_audio_filenames,
annotations_df,
self.builder_config.supervised,
self.builder_config.audio_dir,
self.builder_config.metadata_fields,
self.builder_config.metadata_load_fn,
)
return {
'train': self._generate_examples(segments=segments),
}
def _generate_examples(self, segments: pd.DataFrame):
"""Generate examples from the dataframe of segments.
Args:
segments: Dataframe of segments. Each row (=segment) must minimally
contain the following fields: ['filename', 'url', 'label',
'start_time_s', 'end_time_s'].
Returns:
List of valid segments.
"""
beam = tfds.core.lazy_imports.apache_beam
librosa = tfds.core.lazy_imports.librosa
info = self._info()
full_length = self.builder_config.localization_fn is None
# Drop any extraneous columns.
for k in segments.columns.values:
if k not in info.features and k not in [
'url',
'start_time_s',
'end_time_s',
]:
segments = segments.drop(k, axis=1)
def _process_group(
group: tuple[int, tuple[str, pd.DataFrame]]
) -> Sequence[tuple[str, dict[str, Any]]]:
# Each filename gets a unique ID
recording_id, (filename, segment_group) = group
# Each segment in segment_group will generate a tf.Example. A lot of
# fields, especially metadata ones will be shared between segments.
# Therefore, we create a template.
recording_template = segment_group.iloc[0].copy()
recording_template['recording_id'] = recording_id
url = recording_template['url']
# Remove all the fields we don't need from the recording_template. We
# set errors='ignore' as some fields to be dropped may already not
# exist.
recording_template = recording_template.drop(
['url', 'start_time_s', 'end_time_s'], errors='ignore'
).to_dict()
# Load the audio associated with this group of segments
with tempfile.NamedTemporaryFile(mode='w+b', suffix=url.suffix) as f:
f.write(url.read_bytes())
# librosa outputs lots of warnings which we can safely ignore when
# processing all Xeno-Canto files and PySoundFile is unavailable.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
sr = self.builder_config.sample_rate_hz
try:
audio, _ = librosa.load(
f.name, sr=sr, res_type=self.builder_config.resampling_method
)
except Exception as inst: # pylint: disable=broad-except
# We have no idea what can go wrong in librosa, so we catch a broad
# exception here.
logging.warning(
(
'The audio at %s could not be loaded. Following'
'exception occured: %s'
),
url,
inst,
)
return []
if not full_length:
# We remove all short audios. These short audios are only observed
# among caples_2020 unlabelled recordings.
target_length = int(sr * self.builder_config.interval_length_s)
if len(audio) < target_length:
logging.warning('Skipping audio at %s because too short.', url)
return []
# Resampling can introduce artifacts that push the signal outside the
# [-1, 1) interval.
audio = np.clip(audio, -1.0, 1.0 - (1.0 / float(1 << 15)))
class_list = soundscapes_lib.load_class_list(
self.builder_config.class_list_name,
self.builder_config.keep_unknown_annotation,
)
if full_length:
annotations = soundscapes_lib.get_full_length_annotations(
audio,
segment_group,
class_list,
self.builder_config.sample_rate_hz,
unknown_guard=self.builder_config.full_length_unknown_guard,
)
if annotations.empty:
return []
beam.metrics.Metrics.counter('soundscapes', 'examples').inc()
return [(
filename,
{
**recording_template,
'label': annotations['label'].tolist(),
'audio': audio,
'segment_start': 0,
'segment_end': len(audio),
'segment_id': 0,
'annotation_start': annotations['annotation_start'].tolist(),
'annotation_end': annotations['annotation_end'].tolist(),
},
)]
else:
labeled_intervals = soundscapes_lib.get_labeled_intervals(
audio,
segment_group,
class_list,
self.builder_config.sample_rate_hz,
self.builder_config.interval_length_s,
self.builder_config.localization_fn,
self.builder_config.supervised,
)
# Create a tf.Example for every segment.
valid_segments = []
for index, ((start, end), segment_labels) in enumerate(
labeled_intervals.items()
):
key = f'{filename}_{index}'
valid_segments.append((
key,
{
**recording_template,
'label': list(segment_labels),
'audio': audio[start:end],
'segment_start': start,
'segment_end': end,
'segment_id': index,
},
))
beam.metrics.Metrics.counter('soundscapes', 'examples').inc()
return valid_segments
for group in enumerate(segments.groupby('filename')):
for key, example in _process_group(group):
yield key, example
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bird taxonomy dataset."""
# pylint: disable=unused-import
from .bird_taxonomy import BirdTaxonomy
from .bird_taxonomy import BirdTaxonomyConfig
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bird taxonomy dataset."""
import dataclasses
import functools
import resource
import tempfile
from typing import Any, Callable
import warnings
from chirp import audio_utils
from chirp.data import filter_scrub_utils as fsu
from chirp.data import tfds_features
from chirp.data.bird_taxonomy import premade_queries
from chirp.taxonomy import namespace_db
from etils import epath
import jax
from jax import numpy as jnp
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
_DESCRIPTION = """
Bird taxonomy dataset of Xeno-Canto audio files.
"""
_CITATION = """
@inproceedings{vellinga2015xeno,
title={The Xeno-canto Collection and its Relation to Sound Recognition and Classification.},
author={Vellinga, Willem-Pier and Planqu{\'e}, Robert},
booktitle={CLEF (Working Notes)},
year={2015}
}
Credit for individual audio recordings can be viewed by visiting
https://xeno-canto.org/{xeno_canto_id}, and a given example's Xeno-Canto ID can
be retrieved from the 'filename' feature: 'XC{xeno_canto_id}.mp3'.
"""
# The maximum audio sequence length to consider if a localization function is
# provided. This is 5 * 60 seconds = 5 minutes.
_MAX_LOCALIZATION_LENGTH_S = 5 * 60
LocalizationFn = Callable[[Any, int, float], jnp.ndarray]
@dataclasses.dataclass
class BirdTaxonomyConfig(tfds.core.BuilderConfig):
"""The config used to generate multiple versions of BirdTaxonomy.
Special note on processing queries: Because some queries don't make sense
applying to the metadata dataframe, e.g. scrubbing, we make a disctinction
between `data_processing_query` applied to the recordings' dataframe, and
`metadata_processing_query` applied to the metadata (used in _info()).
Checks are made downstream to ensure both dataframes encode consistent
label spaces.
"""
sample_rate_hz: int = 32_000
resampling_method: str = 'polyphase'
localization_fn: LocalizationFn | None = None
interval_length_s: float | None = None
data_processing_query: fsu.QuerySequence = fsu.QuerySequence(queries=[])
metadata_processing_query: fsu.QuerySequence = fsu.QuerySequence(queries=[])
class_list_name: str = 'xenocanto'
class BirdTaxonomy(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for the bird taxonomy dataset."""
VERSION = tfds.core.Version('2.1.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
'1.1.0': (
'Switched to higher sampling rate, added recording metadata '
'features, switched to log-scaling in slice_peaked_audio.'
),
'1.1.1': 'Added slice_peaked_tiny config.',
'1.1.2': (
'Kept previous tiny_config as reference, but also added a tiny'
'version generated with queries.'
),
'1.2.0': 'Added upstream data config.',
'1.2.1': (
'Added downstream data config. Fixed the upstream query.'
'Bumped the taxonomy_info to 2022-07-18.'
),
'1.2.2': (
'Replacing any non-relevant foreground annotation in the'
'downstream data with "ignore" class: downstream data only'
'contains relevant annotations + "ignore" class.'
),
'1.2.3': (
'Removing any non-relevant annotation from foreground or '
'background in downstream data: downstream data only'
'contains relevant annotations. Also removing order, family and'
'genus metadata, as those will be added in the TF-based'
'processing pipeline.'
),
'1.2.4': 'Adds a unique recording ID and a segment ID to all samples.',
'1.2.5': 'Refactor Int16AsFloatTensor out of BirdTaxonomy.',
'1.3.0': (
'Added "upstream_full_length", "downstream_full_length", '
'"upstream_ar_only_slice_peaked", and '
'"upstream_ar_only_full_length" variants. Removed '
'"slice_peaked_tiny_reference" variant.'
),
'1.4.0': 'Added a seabird_sliced_peaked dataset.',
'1.5.0': 'Updated ebird2021 taxonomy.',
'2.0.0': (
"Updated the upstream split to align with Coffee Farms and Hawai'i."
),
'2.1.0': (
"Added a 'class_representatives_slice_peaked' variant which contains "
'recordings for High Sierras, Sierra Nevada, and Peru species in '
'addition to recordings for artificially-rare and downstream species.'
),
}
BUILDER_CONFIGS = [
# pylint: disable=unexpected-keyword-arg
BirdTaxonomyConfig(
name='slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
description=(
'Chunked audio sequences processed with '
'chirp.audio_utils.slice_peaked_audio.'
),
),
BirdTaxonomyConfig(
name='slice_peaked_tiny',
localization_fn=functools.partial(
audio_utils.slice_peaked_audio, max_intervals=1
),
interval_length_s=6.0,
description=(
'A tiny version of the slice_peaked dataset '
'containing only two species'
),
data_processing_query=fsu.QuerySequence([
fsu.filter_in_class_list('species_code', 'tiny_species'),
fsu.scrub_all_but_class_list('bg_species_codes', 'tiny_species'),
]),
metadata_processing_query=fsu.QuerySequence([
fsu.filter_in_class_list('species_code', 'tiny_species'),
]),
),
BirdTaxonomyConfig(
name='upstream_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
data_processing_query=premade_queries.get_upstream_data_query(),
metadata_processing_query=premade_queries.get_upstream_metadata_query(),
description=(
'Upstream data version with chunked audio sequences '
'processed with chirp.audio_utils.slice_peaked_audio.'
),
),
BirdTaxonomyConfig(
name='upstream_ar_only_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
data_processing_query=premade_queries.get_upstream_data_query(
ar_only=True
),
metadata_processing_query=premade_queries.get_upstream_metadata_query(),
description=(
'Upstream data version (AR-only) with chunked audio '
'sequences processed with '
'chirp.audio_utils.slice_peaked_audio.'
),
),
BirdTaxonomyConfig(
name='downstream_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
data_processing_query=premade_queries.get_downstream_data_query(),
metadata_processing_query=premade_queries.get_downstream_metadata_query(),
description=(
'Downstream data version with chunked audio sequences '
'processed with chirp.audio_utils.slice_peaked_audio.'
),
),
BirdTaxonomyConfig(
name='class_representatives_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
data_processing_query=(
premade_queries.get_class_representatives_data_query()
),
metadata_processing_query=(
premade_queries.get_class_representatives_metadata_query()
),
description=(
'All recordings available to be used as class representatives '
'(namely recording for artificially-rare, downstream, High '
'Sierras, Sierra Nevada, and Peru), processed with '
'chirp.audio_utils.slice_peaked_audio.'
),
),
BirdTaxonomyConfig(
name='full_length',
localization_fn=None,
description='Full-length audio sequences.',
),
BirdTaxonomyConfig(
name='upstream_full_length',
localization_fn=None,
data_processing_query=premade_queries.get_upstream_data_query(),
metadata_processing_query=premade_queries.get_upstream_metadata_query(),
description='Upstream data with full-length audio sequences.',
),
BirdTaxonomyConfig(
name='upstream_ar_only_full_length',
localization_fn=None,
data_processing_query=premade_queries.get_upstream_data_query(
ar_only=True
),
metadata_processing_query=premade_queries.get_upstream_metadata_query(),
description=(
'Upstream data (AR-only) with full-length audio sequences.'
),
),
BirdTaxonomyConfig(
name='downstream_full_length',
localization_fn=None,
data_processing_query=premade_queries.get_downstream_data_query(),
metadata_processing_query=premade_queries.get_downstream_metadata_query(),
description='Downstream data with full-length audio sequences.',
),
BirdTaxonomyConfig(
name='seabird_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
description=(
'Seabird dataset consisting of data '
'with chunked audio sequences processed with '
'chirp.audio_utils.slice_peaked_audio.'
),
data_processing_query=fsu.QuerySequence([
fsu.filter_in_class_list(
'species_code', 'ebird2021_global_seabirds'
),
fsu.scrub_all_but_class_list(
'bg_species_codes', 'ebird2021_global_seabirds'
),
]),
metadata_processing_query=fsu.QuerySequence([
fsu.filter_in_class_list(
'species_code', 'ebird2021_global_seabirds'
),
]),
),
]
GCS_URL = epath.Path('gs://chirp-public-bucket/xeno-canto')
TAXONOMY_INFO_FILENAME = 'taxonomy_info_2022-07-18.json'
def _load_taxonomy_metadata(self, disable_filtering: bool = False):
"""Loads the taxonomy for the dataset."""
db = namespace_db.load_db()
dataset_classes = list(
db.class_lists[self.builder_config.class_list_name].classes
)
taxonomy_df = pd.DataFrame(dataset_classes, columns=['species_code'])
if not disable_filtering:
# We apply all the metadata processing queries
taxonomy_df = fsu.apply_sequence(
taxonomy_df, self.builder_config.metadata_processing_query
)
return taxonomy_df
def _info(self) -> tfds.core.DatasetInfo:
full_length = self.builder_config.localization_fn is None
audio_feature_shape = [
None
if full_length
else int(
self.builder_config.sample_rate_hz
* self.builder_config.interval_length_s
)
]
if tf.io.gfile.exists(self._data_dir):
# If this data exists on disk, load the labels from there
class_names = None
else:
# Load the class list relevant to the file
class_names = self._load_taxonomy_metadata()['species_code'].tolist()
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'audio': tfds_features.Int16AsFloatTensor(
shape=audio_feature_shape,
sample_rate=self.builder_config.sample_rate_hz,
encoding=tfds.features.Encoding.ZLIB,
),
'recording_id': tfds.features.Scalar(dtype=tf.uint64),
'segment_id': tfds.features.Scalar(dtype=tf.int64),
'segment_start': tfds.features.Scalar(dtype=tf.uint64),
'segment_end': tfds.features.Scalar(dtype=tf.uint64),
'label': tfds.features.Sequence(
tfds.features.ClassLabel(names=class_names)
),
'bg_labels': tfds.features.Sequence(
tfds.features.ClassLabel(names=class_names)
),
'filename': tfds.features.Text(),
'quality_score': tfds.features.Text(),
'license': tfds.features.Text(),
'altitude': tfds.features.Text(),
'length': tfds.features.Text(),
'bird_seen': tfds.features.Text(),
'country': tfds.features.Text(),
'latitude': tfds.features.Text(),
'longitude': tfds.features.Text(),
'playback_used': tfds.features.Text(),
'recordist': tfds.features.Text(),
'remarks': tfds.features.Text(),
'sound_type': tfds.features.Text(),
}),
supervised_keys=('audio', 'label'),
homepage='https://github.com/google-research/chirp',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
# Increase the file handle resource soft limit to the hard limit. The
# dataset is large enough that it causes TFDS to hit the soft limit.
_low, _high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (_high, _high))
# No checksum is found for the new taxonomy_info. dl_manager may raise
# an error when removing the line below.
dl_manager._force_checksums_validation = (
False # pylint: disable=protected-access
)
paths = dl_manager.download_and_extract({
'taxonomy_info': (
self.GCS_URL / self.TAXONOMY_INFO_FILENAME
).as_posix(),
})
# Load taxonomy_info, which is a superset of taxonomy_metadata that also
# includes information on the Xeno-Canto files associated with each
# species.
taxonomy_info = pd.read_json(paths['taxonomy_info'])
# Workaround for pandas<1.3.0's lack of multi-column explode. We set the
# index to the non-exploding columns before applying pd.Series.explode
# to the other columns and resetting the index.
source_info = (
taxonomy_info[
taxonomy_info['xeno_canto_ids'].map(
lambda xc_ids: bool(len(xc_ids))
)
]
.set_index([
'species_code',
'xeno_canto_query',
'scientific_name',
'species',
'genus',
'family',
'order',
'common_name',
])
.apply(pd.Series.explode, axis=0)
.reset_index()
)
# Rename columns to reflect the fact that they contain one value per row.
renames = {
'xeno_canto_ids': 'xeno_canto_id',
'altitudes': 'altitude',
'lengths': 'length',
'countries': 'country',
'file_formats': 'file_format',
'latitudes': 'latitude',
'licenses': 'license',
'longitudes': 'longitude',
'quality_scores': 'quality_score',
'recordists': 'recordist',
'sound_types': 'sound_type',
}
source_info = source_info.rename(renames, axis=1)
get_format = lambda s: s['file_format']
get_xc_id = lambda s: s['xeno_canto_id']
to_name = lambda s: f"{s['species_code']}/XC{get_xc_id(s)}.{get_format(s)}"
source_info['url'] = source_info.apply(
lambda s: self.GCS_URL / f'audio-data/{to_name(s)}', axis=1
)
# Apply all the processing queries.
source_info = fsu.apply_sequence(
source_info, self.builder_config.data_processing_query
)
# Remap '' and 'no score' scores to 'E' (the worst score).
source_info['quality_score'] = source_info['quality_score'].map(
lambda s: 'E' if s in ('', 'no score') else s
)
# Remap None to '' for the 'latitude' and 'longitude' columns.
for column in ['latitude', 'longitude']:
source_info[column] = source_info[column].map(lambda s: s or '')
return {
'train': self._generate_examples(source_info=source_info),
}
def _generate_examples(self, source_info: pd.DataFrame):
beam = tfds.core.lazy_imports.apache_beam
librosa = tfds.core.lazy_imports.librosa
def _process_example(row):
recording_id, source = row
with tempfile.NamedTemporaryFile(
mode='w+b', suffix=source['url'].suffix
) as f:
f.write(source['url'].read_bytes())
# librosa outputs lots of warnings which we can safely ignore when
# processing all Xeno-Canto files and PySoundFile is unavailable.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
audio, _ = librosa.load(
f.name,
sr=self.builder_config.sample_rate_hz,
res_type=self.builder_config.resampling_method,
)
# Resampling can introduce artifacts that push the signal outside the
# [-1, 1) interval.
audio = np.clip(audio, -1.0, 1.0 - (1.0 / float(1 << 15)))
# Skip empty audio files.
if audio.shape[0] == 0 or np.max(np.abs(audio)) == 0.0:
return None
# The scrubbed foreground annotations are replaced by ''. When this is the
# case, we translate this annotation into [] rather than [''].
foreground_label = (
[source['species_code']] if source['species_code'] else []
)
return source['xeno_canto_id'], {
'audio': audio,
'recording_id': recording_id,
'segment_id': -1,
'segment_start': 0,
'segment_end': len(audio),
'label': foreground_label,
'bg_labels': source['bg_species_codes'],
'filename': source['url'].name,
'quality_score': source['quality_score'],
'license': source['license'],
'altitude': source['altitude'],
'length': source['length'],
'bird_seen': source['bird_seen'],
'country': source['country'],
'latitude': source['latitude'],
'longitude': source['longitude'],
'playback_used': source['playback_used'],
'recordist': source['recordist'],
'remarks': source['remarks'],
'sound_type': source['sound_type'],
}
if self.builder_config.localization_fn:
def localize_intervals_fn(args):
key, example = args
sample_rate_hz = self.builder_config.sample_rate_hz
interval_length_s = self.builder_config.interval_length_s
target_length = int(sample_rate_hz * interval_length_s)
audio = example['audio']
# We limit audio sequence length to _MAX_LOCALIZATION_LENGTH_S when
# localizing intervals because the localization function can result in
# very large memory consumption for long audio sequences.
max_length = sample_rate_hz * _MAX_LOCALIZATION_LENGTH_S
if audio.shape[0] > max_length:
audio = audio[:max_length]
audio = audio_utils.pad_to_length_if_shorter(audio, target_length)
# Pass padded audio to avoid localization_fn having to pad again
audio_intervals = self.builder_config.localization_fn(
audio, sample_rate_hz, interval_length_s
).tolist()
if not audio_intervals:
# If no peaks were found, we take the first segment of the
# recording to avoid discarding it entirely
audio_intervals = [(0, target_length)]
interval_examples = []
for i, (start, end) in enumerate(audio_intervals):
interval_examples.append((
f'{key}_{i}',
{
**example,
'audio': audio[start:end],
'segment_id': i,
'segment_start': start,
'segment_end': end,
},
))
return interval_examples
else:
localize_intervals_fn = None
for i, key_and_example in enumerate(
map(_process_example, source_info.iterrows())
):
# Since the audio files have variable length, the JAX compilation cache
# can use up a large amount of memory after a while.
if i % 100 == 0:
jax.clear_caches()
# Skip empty audio files.
if key_and_example is None:
continue
if localize_intervals_fn:
for key_and_example in localize_intervals_fn(key_and_example):
yield key_and_example
else:
yield key_and_example
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of premade queries to generate stable data configs."""
from chirp.data import filter_scrub_utils as fsu
from chirp.taxonomy import namespace_db
# SSW_STATS_PATH contains useful statistics for SSW species, used to guide our
# DFS search in chirp.data.sampling_utils.sample_recordings_under_constraints.
# Those statistics were computed after removing all recordings with foreground
# and background labels belonging to downstream_species.txt
# (mimicking the conditions under which the sampling happens).
DOWNSTREAM_SPECIES_PATH = "data/bird_taxonomy/metadata/downstream_species.txt"
SSW_STATS_PATH = "data/bird_taxonomy/metadata/ssw_stats.json"
DOWNSTREAM_CLASS_LIST = "downstream_species_v2"
AR_CLASS_LIST = "artificially_rare_species_v2"
AR_SAMPLING_PRNG_SEED = 2023 + 5 + 11
def get_upstream_metadata_query() -> fsu.QuerySequence:
db = namespace_db.load_db()
downstream_species = list(db.class_lists[DOWNSTREAM_CLASS_LIST].classes)
return fsu.QuerySequence(
[
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.NOT_IN,
"op_kwargs": {
"key": "species_code",
"values": downstream_species,
},
},
)
]
)
def get_upstream_data_query(ar_only: bool = False) -> fsu.QuerySequence:
"""Produces the QuerySequence to generate upstream data.
Args:
ar_only: if True, only include recordings with artificially rare species
annotations.
Returns:
The QuerySequence to apply
"""
db = namespace_db.load_db()
downstream_species = list(db.class_lists[DOWNSTREAM_CLASS_LIST].classes)
# NOTE: Artificially rare species are the subset of SSW species which do not
# intersect with the downstream species.
ar_species = list(db.class_lists[AR_CLASS_LIST].classes)
queries = [
# Filter all samples from downstream species
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.NOT_IN,
"op_kwargs": {
"key": "species_code",
"values": downstream_species,
},
},
),
# Sample recordings from artificially rare (AR) species.
fsu.QuerySequence(
mask_query=fsu.Query(
fsu.MaskOp.IN,
{"key": "species_code", "values": ar_species},
),
queries=[
# Recall that recordings that contain downstream_species in
# background (scrubbed for upstream training) are seen during both
# training and testing. In the meantime, several recordings have
# both SSW species and downstream species in the background.
# Therefore, if we allow such a recording to be candidate for
# sampling, and it ends up being chosen, we will have an AR
# recording in both upstream and downstream data, which is not
# good. Hence the filtering op below. Note that ssw_stats contains
# the statistics of recordings for each AR species **after**
# filtering out all recordings that contain any downstream
# species' annotation.
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.CONTAINS_NO,
"op_kwargs": {
"key": "bg_species_codes",
"values": downstream_species,
},
},
),
fsu.Query(
fsu.TransformOp.SAMPLE,
{
"target_fg": {k: 10 for k in ar_species},
"prng_seed": AR_SAMPLING_PRNG_SEED,
},
),
],
),
# Scrub annotations from downstream species
fsu.Query(
op=fsu.TransformOp.SCRUB,
kwargs={
"key": "bg_species_codes",
"values": downstream_species + ar_species,
},
),
]
if ar_only:
queries.append(
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.IN,
"op_kwargs": {
"key": "species_code",
"values": ar_species,
},
},
)
)
return fsu.QuerySequence(queries)
def get_downstream_metadata_query() -> fsu.QuerySequence:
db = namespace_db.load_db()
downstream_species = list(db.class_lists[DOWNSTREAM_CLASS_LIST].classes)
# NOTE: Artificially rare species are the subset of SSW species which do not
# intersect with the downstream species.
ar_species = list(db.class_lists[AR_CLASS_LIST].classes)
return fsu.QuerySequence([
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.IN,
"op_kwargs": {
"key": "species_code",
"values": downstream_species + ar_species,
},
},
),
])
def get_downstream_data_query() -> fsu.QuerySequence:
"""Produces the QuerySequence to generate downstream data.
Returns:
The QuerySequence to apply.
"""
db = namespace_db.load_db()
downstream_species = list(db.class_lists[DOWNSTREAM_CLASS_LIST].classes)
# NOTE: Artificially rare species are the subset of SSW species which do not
# intersect with the downstream species.
ar_species = list(db.class_lists[AR_CLASS_LIST].classes)
upstream_query = get_upstream_data_query()
return fsu.QuerySequence([
fsu.QueryComplement(upstream_query, "xeno_canto_id"),
# Annotations of species that are not part of the downstream evaluation
# are scrubbed if they appear in the background or foreground.
# Therefore, we're only left with relevant species annotated.
fsu.Query(
op=fsu.TransformOp.SCRUB_ALL_BUT,
kwargs={
"key": "bg_species_codes",
"values": downstream_species + ar_species,
},
),
fsu.Query(
op=fsu.TransformOp.SCRUB_ALL_BUT,
kwargs={
"key": "species_code",
"values": downstream_species + ar_species,
},
),
])
def get_class_representatives_metadata_query() -> fsu.QuerySequence:
db = namespace_db.load_db()
species = list(
set(
list(db.class_lists[DOWNSTREAM_CLASS_LIST].classes)
+ list(db.class_lists[AR_CLASS_LIST].classes)
+ list(db.class_lists["high_sierras"].classes)
+ list(db.class_lists["sierras_kahl"].classes)
+ list(db.class_lists["peru"].classes)
)
)
return fsu.QuerySequence(
[
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.IN,
"op_kwargs": {
"key": "species_code",
"values": species,
},
},
)
]
)
def get_class_representatives_data_query() -> fsu.QuerySequence:
"""Produces the QuerySequence to generate class representatives data."""
db = namespace_db.load_db()
species = list(
set(
list(db.class_lists[DOWNSTREAM_CLASS_LIST].classes)
+ list(db.class_lists["high_sierras"].classes)
+ list(db.class_lists["sierras_kahl"].classes)
+ list(db.class_lists["peru"].classes)
)
)
species_no_ar = [
s for s in species if s not in db.class_lists[AR_CLASS_LIST].classes
]
return fsu.QuerySequence([
fsu.QueryParallel(
queries=[
fsu.Query(
op=fsu.TransformOp.FILTER,
kwargs={
"mask_op": fsu.MaskOp.IN,
"op_kwargs": {
"key": "species_code",
"values": species_no_ar,
},
},
),
get_upstream_data_query(ar_only=True),
],
merge_strategy=fsu.MergeStrategy.CONCAT_NO_DUPLICATES,
),
# This scrubs all background labels except those with values in `species`.
fsu.Query(
op=fsu.TransformOp.SCRUB_ALL_BUT,
kwargs={
"key": "bg_species_codes",
"values": species,
},
),
])
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""soundevents dataset."""
import dataclasses
import tempfile
from typing import Any, Callable, cast
import warnings
from absl import logging
from chirp import audio_utils
from chirp.data import filter_scrub_utils as fsu
from chirp.data import tfds_features
from chirp.taxonomy import namespace_db
from etils import epath
from jax import numpy as jnp
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
_DESCRIPTION = """
Sound events dataset from FSD50K Audioset dataset.
FSD50K is an open dataset of human-labeled sound events containing 51,197
Freesound clips unequally distributed in 200 classes drawn from the
AudioSet Ontology. (https://zenodo.org/record/4060432#.Y4PE5-xKjzc)
Freesound Dataset 50k (or FSD50K for short) is an open dataset of human-labeled
sound events containing 51,197 Freesound clips unequally distributed in 200
classes drawn from the AudioSet Ontology
We use a slightly different format than AudioSet for the naming of class labels
in order to avoid potential problems with spaces, commas, etc.
Example: Accelerating_and_revving_and_vroom instead of the original
Accelerating, revving, vroom. You can go back to the original AudioSet naming
using the information provided in vocabulary.csv (class label and mid for the
200 classes of FSD50K) and the AudioSet Ontology specification.
Audioset consists of an expanding ontology of 632 audio event classes and a
collection of 2,084,320 human-labeled 10-second sound clips drawn from
YouTube videos (https://research.google.com/audioset/index.html)
The AudioSet ontology is a collection of sound events organized in a hierarchy.
https://research.google.com/audioset/ontology/index.html
The AudioSet dataset is a large-scale collection of human-labeled 10-second
sound clips drawn from YouTube videos. To collect all our data we worked with
human annotators who verified the presence of sounds they heard within
YouTube segments. (https://research.google.com/audioset/dataset/index.html)
"""
_CITATION = """
@inproceedings{fonseca2022FSD50K,
title={{FSD50K}: an open dataset of human-labeled sound events},
author={Fonseca, Eduardo and Favory, Xavier and Pons, Jordi and Font, Frederic
and Serra, Xavier},
journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
volume={30},
pages={829--852},
year={2022},
publisher={IEEE}
}
"""
LocalizationFn = Callable[[Any, int, float], jnp.ndarray]
@dataclasses.dataclass
class SoundeventsConfig(tfds.core.BuilderConfig):
"""The config to generate multiple versions of Sound Events from FSD50K."""
sample_rate_hz: int = 32_000
resampling_method: str = 'polyphase'
localization_fn: LocalizationFn | None = None
interval_length_s: float | None = None
data_processing_query: fsu.QuerySequence = fsu.QuerySequence(queries=[])
metadata_processing_query: fsu.QuerySequence = fsu.QuerySequence(queries=[])
class_list_name: str = 'fsd50k'
class Soundevents(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for soundevents dataset."""
VERSION = tfds.core.Version('1.0.1')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
'1.0.1': 'Added a config filter out bird classes',
}
BUILDER_CONFIGS = [
# pylint: disable=unexpected-keyword-arg
SoundeventsConfig(
name='fsd50k_full_length',
localization_fn=None,
class_list_name='fsd50k',
description='full length audio sequences processed with ',
),
SoundeventsConfig(
name='fsd50k_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
class_list_name='fsd50k',
description=(
'Chunked audio sequences processed with '
'chirp.audio_utils.slice_peaked_audio.'
),
),
SoundeventsConfig(
name='fsd50k_no_bird_slice_peaked',
localization_fn=audio_utils.slice_peaked_audio,
interval_length_s=6.0,
class_list_name='fsd50k',
description=(
'FSD50K dataset excluding bird classes '
'chunked audio sequences processed with '
'chirp.audio_utils.slice_peaked_audio.'
),
data_processing_query=fsu.QuerySequence(
[fsu.filter_contains_no_class_list('class_code', 'fsd50k_birds')]
),
),
]
GCS_URL = epath.Path('gs://chirp-public-bucket/soundevents/fsd50k')
DATASET_CONFIG = {
'dev': {
'ground_truth_file': GCS_URL / 'FSD50K.ground_truth/dev.csv',
'audio_dir': GCS_URL / 'dev_audio',
},
'eval': {
'ground_truth_file': GCS_URL / 'FSD50K.ground_truth/eval.csv',
'audio_dir': GCS_URL / 'eval_audio',
},
}
def _info(self) -> tfds.core.DatasetInfo:
db = namespace_db.load_db()
dataset_class_list = db.class_lists[self.builder_config.class_list_name]
logging.info(
'Currently considering a total of %s soundevent.',
len(dataset_class_list.classes),
)
full_length = self.builder_config.localization_fn is None
audio_feature_shape = [
None
if full_length
else int(
self.builder_config.sample_rate_hz
* self.builder_config.interval_length_s
)
]
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'audio': tfds_features.Int16AsFloatTensor(
shape=audio_feature_shape,
sample_rate=self.builder_config.sample_rate_hz,
encoding=tfds.features.Encoding.ZLIB,
),
'recording_id': tfds.features.Scalar(dtype=tf.uint64),
'segment_id': tfds.features.Scalar(dtype=tf.int64),
'segment_start': tfds.features.Scalar(dtype=tf.uint64),
'segment_end': tfds.features.Scalar(dtype=tf.uint64),
'label': tfds.features.Sequence(
tfds.features.ClassLabel(names=dataset_class_list.classes)
),
'filename': tfds.features.Text(),
'class_name': tfds.features.Text(),
}),
supervised_keys=('audio', 'label'),
homepage='https://github.com/google-research/chirp',
citation=_CITATION,
)
def _load_dataset(
self, dl_manager: tfds.download.DownloadManager, dataset_type: str
) -> pd.DataFrame:
"""Loading FSD50k train or test dataset from bucket.
'dataset_type' is eaither dev or eval used to seperate train and test
dataset in FSD50K dataset. This dowload and process ground truth file
generate source_infor for dataset
Args:
dl_manager: Download Manager
dataset_type: 'train' or 'eval' dataset type. Corresponding ground truth
files are slightly different format in FSD50K dataset. Dowloading and
preparing source_infromation
Returns:
source_info: A dataframe contain our source infromation for each data
element
"""
dl_manager._force_checksums_validation = (
False # pylint: disable=protected-access
)
# get ground truth files for dev set which included dev and val examples
paths = dl_manager.download_and_extract({
'dataset_info_dev': (
self.DATASET_CONFIG['dev']['ground_truth_file']
).as_posix(),
'dataset_info_eval': (
self.DATASET_CONFIG['eval']['ground_truth_file']
).as_posix(),
})
source_info = pd.read_csv(paths[f'dataset_info_{dataset_type}'])
if dataset_type == 'eval':
source_info.columns = ['fname', 'labels', 'mids']
source_info['split'] = 'test'
# get_split = lambda s: s['split']
get_labels = lambda s: s['labels'].split(',')
get_class_codes = lambda s: s['mids'].split(',')
get_filename = lambda s: f"{s['fname']}.wav"
audio_dir = self.DATASET_CONFIG[dataset_type]['audio_dir']
source_info['url'] = source_info.apply(
lambda s: audio_dir / f'{get_filename(s)}', axis=1
)
source_info['label'] = source_info.apply(get_labels, axis=1)
source_info['class_code'] = source_info.apply(get_class_codes, axis=1)
source_info = source_info.drop(['mids', 'labels'], axis=1)
# Apply all the processing queries.
# TODO(haritaoglu) need to test processing queries
source_info = fsu.apply_sequence(
source_info, self.builder_config.data_processing_query
)
if source_info is pd.Series:
source_info = source_info.to_frame()
else:
assert type(source_info) is pd.DataFrame
source_info = cast(pd.DataFrame, source_info)
return source_info
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
# Defined as part of the tfds API for dividing the dataset into splits.
# https://www.tensorflow.org/datasets/add_dataset#specifying_dataset_splits
train_source_info = self._load_dataset(dl_manager, 'dev')
eval_source_info = self._load_dataset(dl_manager, 'eval')
return {
'train': self._generate_examples(source_info=train_source_info),
'test': self._generate_examples(source_info=eval_source_info),
}
def _generate_examples(self, source_info: pd.DataFrame):
beam = tfds.core.lazy_imports.apache_beam
librosa = tfds.core.lazy_imports.librosa
def _process_example(row):
recording_id, source = row
with tempfile.NamedTemporaryFile(
mode='w+b', suffix=source['url'].suffix
) as f:
f.write(source['url'].read_bytes())
# librosa outputs lots of warnings which we can safely ignore when
# processing all souendevents files and PySoundFile is unavailable.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
audio, _ = librosa.load(
f.name,
sr=self.builder_config.sample_rate_hz,
res_type=self.builder_config.resampling_method,
)
# Resampling can introduce artifacts that push the signal outside the
# [-1, 1) interval.
audio = np.clip(audio, -1.0, 1.0 - (1.0 / float(1 << 15)))
label = source['class_code']
return source['fname'], {
'audio': audio,
'recording_id': recording_id,
'segment_id': -1,
'segment_start': 0,
'segment_end': len(audio),
'label': label,
'class_name': source['label'][0],
'filename': source['url'].as_posix(),
}
pipeline = beam.Create(source_info.iterrows()) | beam.Map(_process_example)
if self.builder_config.localization_fn:
print('Adding Localization_function')
def _localize_intervals(args):
key, example = args
sample_rate_hz = self.builder_config.sample_rate_hz
interval_length_s = self.builder_config.interval_length_s
target_length = int(sample_rate_hz * interval_length_s)
audio = audio_utils.pad_to_length_if_shorter(
example['audio'], target_length
)
# Pass padded audio to avoid localization_fn having to pad again
audio_intervals = self.builder_config.localization_fn(
audio, sample_rate_hz, interval_length_s
).tolist()
if not audio_intervals:
# If no peaks were found, we take the first segment of the
# recording to avoid discarding it entirely
audio_intervals = [(0, target_length)]
interval_examples = []
for i, (start, end) in enumerate(audio_intervals):
interval_examples.append((
f'{key}_{i}',
{
**example,
'audio': audio[start:end],
'segment_id': i,
'segment_start': start,
'segment_end': end,
},
))
print(f' Interval examples : {interval_examples}')
return interval_examples
pipeline = pipeline | beam.FlatMap(_localize_intervals)
return pipeline
|
# coding=utf-8
# Copyright 2023 The Chirp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""soundevents dataset."""
from .soundevents import Soundevents
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install BigBird."""
import setuptools
# Get install requirements from the REQUIREMENTS file.
with open('requirements.txt') as fp:
_REQUIREMENTS = fp.read().splitlines()
# Get the long description from the README file.
with open('README.md') as fp:
_LONG_DESCRIPTION = fp.read()
setuptools.setup(
name='bigbird',
version='0.0.1',
description='Big Bird: Transformers for Long Sequences',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author='Google Inc.',
author_email='[email protected]',
url='http://github.com/google-research/bigbird',
license='Apache 2.0',
packages=[
'bigbird', 'bigbird.core', 'bigbird.classifier',
'bigbird.pretrain', 'bigbird.summarization'
],
package_data={'bigbird': ['vocab/*']},
scripts=[],
install_requires=_REQUIREMENTS,
keywords='deeplearning machinelearning nlp classifier qa summarization transformer pretraining',
)
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigBird Decoder Layers."""
from bigbird.core import attention
from bigbird.core import beam_search
from bigbird.core import recompute_grad
from bigbird.core import utils
import tensorflow.compat.v2 as tf
class PrenormDecoderLayer(tf.keras.layers.Layer):
"""Decoder layer of a transformer in Pegasus style.
The layer_norm is taken before self-attention.
"""
def __init__(self,
hidden_size=768,
intermediate_size=3072,
intermediate_act_fn=utils.gelu,
attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.1,
initializer_range=0.02,
num_attention_heads=12,
use_bias=True,
name=None):
"""Constructor of a decoder layer of a transformer in Pegasus style.
Args:
hidden_size: (optional) int. Size of hidden dimension.
intermediate_size: (optional) int. Size of intermediate dimension.
intermediate_act_fn: optional) Activation function for intermediate layer.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
hidden_dropout_prob: (optional) float. Dropout probability of the
attention.
initializer_range: (optional) float. Range of the weight initializer.
num_attention_heads: (optional) int. Number of attention heads.
use_bias: (optional) bool. Whether key/query/value uses a bias vector.
name: The name scope of this layer.
"""
super(PrenormDecoderLayer, self).__init__(name=name)
with tf.compat.v1.variable_scope(name):
attention_head_size = hidden_size // num_attention_heads
with tf.compat.v1.variable_scope("attention"):
# Pre-Normalization layer
with tf.compat.v1.variable_scope("self"):
self.first_layer_norm = utils.NormLayer(hidden_size)
# Self-Attention layer
self.self_attn_layer = attention.MultiHeadedAttentionLayer(
"original_full", use_bias=use_bias, name="self",
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
initializer_range=initializer_range,
attention_probs_dropout_prob=attention_probs_dropout_prob)
# Feedforward layer
with tf.compat.v1.variable_scope("output"):
self.self_proj_layer = utils.Dense3dProjLayer(
num_attention_heads, attention_head_size,
utils.create_initializer(initializer_range), None,
"dense", use_bias)
# Dropout
self.self_attn_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
# Pre-Normalization layer
with tf.compat.v1.variable_scope("encdec"):
self.second_layer_norm = utils.NormLayer(hidden_size)
# Cross-Attention layer
self.cross_attn_layer = attention.MultiHeadedAttentionLayer(
"original_full", use_bias=use_bias, name="encdec",
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
initializer_range=initializer_range,
attention_probs_dropout_prob=attention_probs_dropout_prob)
# Feedforward layer
with tf.compat.v1.variable_scope("encdec_output"):
self.cross_proj_layer = utils.Dense3dProjLayer(
num_attention_heads, attention_head_size,
utils.create_initializer(initializer_range), None,
"dense", use_bias)
# Dropout
self.cross_attn_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
with tf.compat.v1.variable_scope("intermediate"):
# Normalization layer
self.third_layer_norm = utils.NormLayer(hidden_size)
# Feedforward layer
self.expand_layer = utils.Dense2dLayer(
hidden_size, intermediate_size,
utils.create_initializer(initializer_range),
intermediate_act_fn, "dense")
with tf.compat.v1.variable_scope("output"):
# Feedforward layer
self.contract_layer = utils.Dense2dLayer(
intermediate_size, hidden_size,
utils.create_initializer(initializer_range),
None, "dense")
# Dropout
self.output_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
def call(self,
layer_input,
encoder_outputs,
self_attention_mask,
attention_mask,
cache=None,
decode_i=None,
training=None):
"""Implements a decoder layer of a transformer in Pegasus style.
The layer_norm is taken after self-attention.
Args:
layer_input: float Tensor of shape [batch_size, seq_length, hidden_size].
encoder_outputs: tensors with shape [batch_size, input_length,
num_hidden_layers, hidden_size]
self_attention_mask: bias for decoder self-attention layer. [1, 1,
target_length, target_length]
attention_mask: bias for encoder-decoder attention layer. [batch_size, 1,
1, input_length]
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head],
"v": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head]}
decode_i: (Used during prediction) current location of decoding
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
NotImplementedError: For unknown attention type.
"""
# self-attention
normalized_layer_input = self.first_layer_norm(layer_input)
self_attention_output = self.self_attn_layer(
normalized_layer_input, normalized_layer_input, [self_attention_mask],
cache=cache, decode_i=decode_i, training=training)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
self_attention_output = self.self_proj_layer(self_attention_output)
self_attention_output = self.self_attn_dropout(self_attention_output,
training=training)
self_attention_output = self_attention_output + layer_input
# Cross-attention
normalized_self_attention_output = self.second_layer_norm(
self_attention_output)
attention_output = self.cross_attn_layer(
normalized_self_attention_output, encoder_outputs, [attention_mask],
training=training)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.cross_proj_layer(attention_output)
attention_output = self.cross_attn_dropout(attention_output,
training=training)
attention_output = attention_output + self_attention_output
# The activation is only applied to the "intermediate" hidden layer.
normalized_attention_output = self.third_layer_norm(attention_output)
intermediate_output = self.expand_layer(normalized_attention_output)
# Down-project back to `hidden_size` then add the residual.
layer_output = self.contract_layer(intermediate_output)
layer_output = self.output_dropout(layer_output, training=training)
layer_output = layer_output + attention_output
return layer_output
class PostnormDecoderLayer(tf.keras.layers.Layer):
"""Decoder layer of a transformer in BERT style.
The layer_norm is taken before self-attention.
"""
def __init__(self,
hidden_size=768,
intermediate_size=3072,
intermediate_act_fn=utils.gelu,
attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.1,
initializer_range=0.02,
num_attention_heads=12,
use_bias=True,
name=None):
"""Constructor of a decoder layer of a transformer in BERT style.
Args:
hidden_size: (optional) int. Size of hidden dimension.
intermediate_size: (optional) int. Size of intermediate dimension.
intermediate_act_fn: optional) Activation function for intermediate layer.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
hidden_dropout_prob: (optional) float. Dropout probability of the
attention.
initializer_range: (optional) float. Range of the weight initializer.
num_attention_heads: (optional) int. Number of attention heads.
use_bias: (optional) bool. Whether key/query/value uses a bias vector.
name: The name scope of this layer.
"""
super(PostnormDecoderLayer, self).__init__(name=name)
with tf.compat.v1.variable_scope(name):
attention_head_size = hidden_size // num_attention_heads
with tf.compat.v1.variable_scope("attention"):
# Self-Attention layers
self.self_attn_layer = attention.MultiHeadedAttentionLayer(
"original_full", use_bias=use_bias, name="self",
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
initializer_range=initializer_range,
attention_probs_dropout_prob=attention_probs_dropout_prob)
with tf.compat.v1.variable_scope("output"):
# Feedforward layer
self.self_proj_layer = utils.Dense3dProjLayer(
num_attention_heads, attention_head_size,
utils.create_initializer(initializer_range), None,
"dense", use_bias)
# Post-Normalization layer
self.first_layer_norm = utils.NormLayer(hidden_size)
# Dropout
self.self_attn_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
# Cross-Attention layers
self.cross_attn_layer = attention.MultiHeadedAttentionLayer(
"original_full", use_bias=use_bias, name="encdec",
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
initializer_range=initializer_range,
attention_probs_dropout_prob=attention_probs_dropout_prob)
with tf.compat.v1.variable_scope("encdec_output"):
# Feedforward layer
self.cross_proj_layer = utils.Dense3dProjLayer(
num_attention_heads, attention_head_size,
utils.create_initializer(initializer_range), None,
"dense", use_bias)
# Post-Normalization layer
self.second_layer_norm = utils.NormLayer(hidden_size)
# Dropout
self.cross_attn_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
with tf.compat.v1.variable_scope("intermediate"):
# Feedforward layer
self.expand_layer = utils.Dense2dLayer(
hidden_size, intermediate_size,
utils.create_initializer(initializer_range),
intermediate_act_fn, "dense")
with tf.compat.v1.variable_scope("output"):
# Feedforward layer
self.contract_layer = utils.Dense2dLayer(
intermediate_size, hidden_size,
utils.create_initializer(initializer_range),
None, "dense")
# Normalization layer
self.third_layer_norm = utils.NormLayer(hidden_size)
# Dropout
self.output_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
def call(self,
layer_input,
encoder_outputs,
self_attention_mask,
attention_mask,
cache=None,
decode_i=None,
training=None):
"""Implements a decoder layer of a transformer in BERT style.
The layer_norm is taken after self-attention.
Args:
layer_input: float Tensor of shape [batch_size, seq_length, hidden_size].
encoder_outputs: tensors with shape [batch_size, input_length,
num_hidden_layers, hidden_size]
self_attention_mask: bias for decoder self-attention layer. [1, 1,
target_length, target_length]
attention_mask: bias for encoder-decoder attention layer. [batch_size, 1,
1, input_length]
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head],
"v": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head]}
decode_i: (Used during prediction) current location of decoding
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
NotImplementedError: For unknown attention type.
"""
# self-attention
self_attention_output = self.self_attn_layer(
layer_input, layer_input, [self_attention_mask],
cache=cache, decode_i=decode_i, training=training)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
self_attention_output = self.self_proj_layer(self_attention_output)
self_attention_output = self.self_attn_dropout(self_attention_output,
training=training)
self_attention_output = self.first_layer_norm(
self_attention_output + layer_input)
# cross-attention
attention_output = self.cross_attn_layer(
self_attention_output, encoder_outputs, [attention_mask],
training=training)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.cross_proj_layer(attention_output)
attention_output = self.cross_attn_dropout(attention_output,
training=training)
attention_output = self.second_layer_norm(
attention_output + self_attention_output)
# The activation is only applied to the "intermediate" hidden layer.
intermediate_output = self.expand_layer(attention_output)
# Down-project back to `hidden_size` then add the residual.
layer_output = self.contract_layer(intermediate_output)
layer_output = self.output_dropout(layer_output, training=training)
layer_output = self.third_layer_norm(layer_output + attention_output)
return layer_output
def add_gradient_recomputation(original_class):
"""Creats a subclass which enables gradient checkpointing."""
class RecomputeLayer(original_class):
"""Transformer layer that recomputes the forward pass during backprop."""
def call(self,
layer_input,
encoder_outputs,
self_attention_mask,
attention_mask,
cache=None,
decode_i=None,
training=None):
def f(layer_input, encoder_outputs):
x = super(RecomputeLayer, self).call(
layer_input, encoder_outputs, self_attention_mask, attention_mask,
cache, decode_i, training=training)
return x
f = recompute_grad.recompute_grad(f)
return f(layer_input, encoder_outputs)
return RecomputeLayer
class DecoderStack(tf.keras.layers.Layer):
"""Transformer decoder stack."""
def __init__(self, params):
if params["couple_encoder_decoder"]:
name = "encoder"
super(DecoderStack, self).__init__(name=name)
else:
name = "decoder"
super(DecoderStack, self).__init__(name=name)
self.params = params
if params["norm_type"] == "prenorm":
decoder_class = PrenormDecoderLayer
elif params["norm_type"] == "postnorm":
decoder_class = PostnormDecoderLayer
else:
raise NotImplementedError(
"Norm type {} is not implemented".format(params["norm_type"]))
if params["use_gradient_checkpointing"]:
decoder_class = add_gradient_recomputation(decoder_class)
if self.params.get("num_decoder_layers", None) is not None:
num_hidden_layers = self.params["num_decoder_layers"]
else:
num_hidden_layers = self.params["num_hidden_layers"]
with tf.compat.v1.variable_scope(name):
# Decoder layers
self.decoder_layers = [
decoder_class( # pylint: disable=g-complex-comprehension
self.params["hidden_size"],
self.params["intermediate_size"],
utils.get_activation(self.params["hidden_act"]),
self.params["attention_probs_dropout_prob"],
self.params["hidden_dropout_prob"],
self.params["initializer_range"],
self.params["num_attention_heads"],
self.params["use_bias"],
name="layer_%d" % layer_idx)
for layer_idx in range(num_hidden_layers)
]
# Normalization layer
self.layer_norm = utils.NormLayer(self.params["hidden_size"])
def call(self,
decoder_inputs,
self_attention_mask,
encoder_outputs,
encoder_mask,
cache=None,
decode_i=None,
training=None):
"""Return the output of the decoder layer stacks.
Args:
decoder_inputs: tensor with shape
[batch_size, target_length, hidden_size]
self_attention_mask: bias for decoder self-attention layer. [1, 1,
target_length, target_length]
encoder_outputs: tensors with shape [batch_size, input_length,
hidden_size]
encoder_mask: bias for encoder-decoder attention layer. [batch_size,
input_length]
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head],
"v": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head]}
decode_i: (Used during prediction) current location of decoding.
training: Boolean indicating whether the call is training or inference.
Returns:
Output of decoder layer stack. A float32 tensor with shape [batch_size,
target_length, hidden_size]
"""
# Expand encoder mask to broadcast over num heads and from_seq axis
attention_mask = tf.expand_dims(tf.expand_dims(encoder_mask, 1), 1)
attention_mask = tf.cast(attention_mask, tf.float32)
if self.params["norm_type"] == "postnorm":
decoder_inputs = self.layer_norm(decoder_inputs)
layer_output = decoder_inputs
for layer in self.decoder_layers:
layer_cache = cache[layer.name] if cache is not None else None
layer_output = layer(
layer_output, encoder_outputs, self_attention_mask, attention_mask,
layer_cache, decode_i, training=training)
if self.params["norm_type"] == "prenorm":
layer_output = self.layer_norm(layer_output)
return layer_output
def create_self_attention_mask(length):
with tf.name_scope("decoder_self_attention_mask"):
valid_locs = tf.linalg.band_part(tf.ones([length, length]), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
return valid_locs
def inplace_update_i(inp_tensor, updates, i):
"""Inplace update a tensor. B: batch_size, L: tensor length."""
batch_size = inp_tensor.shape[0]
indices = tf.stack([
tf.range(batch_size, dtype=tf.int32),
tf.fill([batch_size], tf.cast(i, tf.int32))
], axis=-1)
return tf.tensor_scatter_nd_update(inp_tensor, indices, updates)
# pylint: disable=invalid-name
def left2right_decode(symbols_to_logits_fn,
start_symbols,
context_BxU_dict,
batch_size,
max_decode_len,
vocab_size,
beam_size=1,
beam_start=5,
beam_alpha=0.6,
beam_min=0,
beam_max=-1,
eos_id=1):
"""left to right decode.
Notations:
B: batch_size, V: vocab_size, T: decode_len, U: undefined dimensions
Args:
symbols_to_logits_fn: logits = fn(decodes, context, i). Shoud take
[batch_size, decoded_ids] and return [batch_size, vocab_size].
start_symbols: starting ids [batch_size]
context_BxU_dict: dict of Tensors.
batch_size: int, decode batch size.
max_decode_len: int, maximum number of steps to decode.
vocab_size: int, output vocab size.
beam_size: Number of beams to decode.
beam_start: start length for scaling, default to 5.
beam_alpha: Length penalty for decoding. Should be between 0 (shorter) and 1
(longer), default to 0.6.
beam_min: Minimum beam search lengths.
beam_max: Maximum beam search lengths. Set -1 to use unlimited.
eos_id: end of token id, default to 1.
Returns:
decodes: Tensor[batch, decode_len]
"""
dtype = tf.int32
start_symbols = tf.expand_dims(start_symbols, 1)
# When beam_size=1, beam_search does not behave exactly like greedy.
# This is due to using 2 * beam_size in grow_topk, and keep the top beam_size
# ones that haven't reached EOS into alive.
# In this case, alpha value for length penalty will take effect.
if beam_size == 1:
def decode_loop(i, decodes_BxT, cache_BxU_dict):
logits_BxV = symbols_to_logits_fn(decodes_BxT, cache_BxU_dict, i)
decodes_BxT = inplace_update_i(
decodes_BxT, tf.argmax(logits_BxV, -1, output_type=tf.int32), i)
return i + 1, decodes_BxT, cache_BxU_dict
def loop_cond(i, decodes_BxT, unused_cache_BxU_dict):
finished_B = tf.reduce_any(tf.equal(decodes_BxT, eos_id), axis=1)
return tf.logical_and(i < max_decode_len,
tf.logical_not(tf.reduce_all(finished_B)))
init_dec_BxT = tf.concat([tf.cast(start_symbols, dtype=dtype),
tf.zeros([batch_size, max_decode_len-1],
dtype=dtype)], axis=1)
_, decodes, _ = tf.while_loop(
loop_cond, decode_loop,
[tf.constant(0, dtype=dtype), init_dec_BxT, context_BxU_dict])
return decodes
else:
def symbols_to_logits_fn_with_sampling(decodes_BxT, states_BxU_dict, i):
logits_BxV = symbols_to_logits_fn(decodes_BxT, states_BxU_dict, i)
return logits_BxV, states_BxU_dict
length_norm_fn = beam_search.length_normalization(beam_start, beam_alpha,
beam_min, beam_max, -1e3)
init_dec_BxT = tf.concat([tf.cast(start_symbols, dtype=tf.int32),
tf.zeros([batch_size, max_decode_len-1],
dtype=tf.int32)], axis=1)
beams, _ = beam_search.beam_search(
symbols_to_logits_fn_with_sampling,
init_dec_BxT,
context_BxU_dict, vocab_size, beam_size, length_norm_fn, eos_id)
return beams[:, 0, :]
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigBird Attention Layers."""
from absl import logging
from bigbird.core import recompute_grad
from bigbird.core import utils
import numpy as np
import tensorflow.compat.v2 as tf
MAX_SEQ_LEN = 4096
def get_single_block_row_attention(block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1):
"""For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention coloum start id.
to_end_block_id: int. random attention coloum end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
"""
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id,
dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# print(perm_block)
# illegal blocks for the current block id, using window
illegal_blocks = list(
range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(
list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id-2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blokcs = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blokcs.append(perm_block[i])
if len(selected_random_blokcs) == num_rand_blocks:
break
return np.array(selected_random_blokcs, dtype=np.int32)
def bigbird_block_rand_mask_with_head(seq_length,
block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1):
"""Create adjacency list of random attention.
Args:
seq_length: int. length of sequence.
block_size: int. size of block in sequence.
num_heads: int. total number of heads.
plan_from_length: list. plan from lenght where num_rand are choosen from.
plan_num_rand_blocks: list. number of rand blocks within the plan.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_top: int. number of blocks at the top.
global_block_bottom: int. number of blocks at the bottom.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
adjacency list of size num_head where each element is of size
from_seq_length//from_block_size-2 by num_rand_blocks
"""
# Total number of blocks in the mmask
num_blocks = seq_length//block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(seq_length)
# Random Attention adjajency list
rand_attn = [np.zeros((num_blocks,
np.sum(plan_num_rand_blocks[:max_plan_idx+1])),
dtype=np.int32) for i in range(num_heads)]
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx+1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start fromm plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx+1]))
for blk_rw_idx in range(global_block_top,
plan_block_length[plan_idx-1]):
for h in range(num_heads):
# print("head", h, "blk_rw_idx", blk_rw_idx)
rand_attn[h][blk_rw_idx,
rnd_r_cnt:curr_r_cnt] = get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx-1],
plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id-1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id+1]))
for h in range(num_heads):
# print("head", h, "blk_rw_idx", blk_rw_idx)
rand_attn[h][blk_rw_idx,
rnd_r_cnt:curr_r_cnt] = get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right)
if plan_num_rand_blocks[plan_idx] == 0:
continue
# print("Start from here")
curr_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx+1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx-1]
to_start_block_id = plan_block_length[plan_idx-1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
# print("head", h, "blk_rw_idx", blk_rw_idx)
rand_attn[h][blk_rw_idx,
rnd_r_cnt:curr_r_cnt] = get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks -
global_block_bottom, :]
return rand_attn
def get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
"""Gives the plan of where to put random attention.
Args:
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
num_rand_blocks: int. Number of random chunks per row.
Returns:
plan_from_length: ending location of from block
plan_num_rand_blocks: number of random ending location for each block
"""
# general plan
plan_from_length = []
plan_num_rand_blocks = []
if (2*num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2*num_rand_blocks + 5)*from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5)*from_block_size))
plan_num_rand_blocks.append(num_rand_blocks//2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks//2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
def bigbird_block_rand_mask(from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_rand_blocks,
last_idx=-1):
"""Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks choosen only upto last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
rand_attn = np.zeros(
(from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size-1):
start = i-2
end = i
if i == 1:
rand_attn[i-1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i-1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i-1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i-1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i-1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end+1) == last:
rand_attn[i-1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i-1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end+1:last])))[:r]
return rand_attn
def full_bigbird_mask(from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
rand_attn):
"""Calculate BigBird attention pattern as a full dense matrix.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
rand_attn: adjajency matrix for random attention.
Returns:
attention mask matrix of shape [from_seq_length, to_seq_length]
"""
attn_mask = np.zeros((MAX_SEQ_LEN, MAX_SEQ_LEN), dtype=np.int32)
for i in range(1, (MAX_SEQ_LEN // from_block_size) - 1):
attn_mask[(i) * from_block_size:(i + 1) * from_block_size,
(i - 1) * to_block_size:(i + 2) * to_block_size] = 1
for j in rand_attn[i - 1, :]:
attn_mask[i * from_block_size:(i + 1) * from_block_size,
j * to_block_size:(j + 1) * to_block_size] = 1
attn_mask[:from_block_size, :] = 1
attn_mask[:, :to_block_size] = 1
attn_mask[:, -to_block_size:] = 1
attn_mask[-from_block_size:, :] = 1
clipped_attn_mask = attn_mask[:from_seq_length, :to_seq_length]
return np.array(clipped_attn_mask, dtype=bool)
def create_rand_mask_from_inputs(from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
from_seq_length,
from_block_size):
"""Create 4D attention mask from a 3D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads,
from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = tf.reshape(
tf.gather(to_blocked_mask, rand_attn, batch_dims=1), [
-1, num_attention_heads, num_windows,
num_rand_blocks * from_block_size
])
rand_mask = tf.einsum("BLQ,BHLK->BHLQK", from_blocked_mask[:, 1:-1],
rand_mask)
return rand_mask
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
"""Create 4D attention mask from a 3D blocked tensor mask.
Args:
from_blocked_mask: 3D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: 3D Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Returns:
float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4,
from_block_size, 3*to_block_size].
"""
exp_blocked_to_pad = tf.concat(
[to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2],
to_blocked_mask[:, 3:-1]], 2)
band_mask = tf.einsum(
"BLQ,BLK->BLQK", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)
band_mask = tf.expand_dims(band_mask, 1)
return band_mask
def create_attention_mask_from_input_mask(from_mask, to_mask):
"""Create attention mask from a 2D tensor mask.
Args:
from_mask: float32 Tensor of shape [batch_size, from_seq_length].
to_mask: float32 Tensor of shape [batch_size, to_seq_length].
Returns:
float32 Tensor of shape [batch_size, 1, from_seq_length, to_seq_length].
"""
mask = tf.einsum("BF,BT->BFT", from_mask, to_mask)
# expand to create a slot for heads.
mask = tf.expand_dims(mask, 1)
return mask
def bigbird_block_sparse_attention(query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
size_per_head,
num_rand_blocks,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size):
"""BigBird attention sparse calculation using blocks in linear time.
Assumes from_seq_length//from_block_size == to_seq_length//to_block_size.
A pure function with a long argument list to allow easy use outside our
framework.
Args:
query_layer: float Tensor of shape [batch_size, num_attention_heads,
from_seq_length, size_per_head]
key_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
value_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
band_mask: float32 Tensor of shape [batch_size, 1,
from_seq_length//from_block_size-4, from_block_size, 3*to_block_size].
The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
from_mask: float32 Tensor of shape [batch_size, 1, from_seq_length, 1].
The values should be 1 or 0. The attention scores will effectively be set
to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
to_mask: float32 Tensor of shape [batch_size, 1, 1, to_seq_length].
The values should be 1 or 0. The attention scores will effectively be set
to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
from_blocked_mask: float32 Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
Same as from_mask, just reshaped.
to_blocked_mask: float32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Same as to_mask, just reshaped.
rand_attn: int32 Tensor of shape [num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks] specifying which
blocks to attend to for each from sequence block (except 2 global ones).
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
num_rand_blocks: int. Number of random chunks per row.
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
"""
assert from_seq_length//from_block_size == to_seq_length//to_block_size
# repeat for batch size
batch_size = utils.get_shape_list(query_layer)[0]
rand_attn = tf.expand_dims(rand_attn, 0)
rand_attn = tf.repeat(rand_attn, batch_size, 0)
rand_mask = create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn,
num_attention_heads, num_rand_blocks,
from_seq_length, from_block_size)
# Define shorthands
# b = batch_size
h = num_attention_heads
r = num_rand_blocks
d = size_per_head
m = from_seq_length
n = to_seq_length
wm = from_block_size
wn = to_block_size
blocked_query_matrix = tf.reshape(query_layer, (-1, h, m // wm, wm, d))
blocked_key_matrix = tf.reshape(key_layer, (-1, h, n // wn, wn, d))
blocked_value_matrix = tf.reshape(value_layer, (-1, h, n // wn, wn, d))
gathered_key = tf.reshape(
tf.gather(blocked_key_matrix, rand_attn, batch_dims=2, name="gather_key"),
(-1, h, m // wm - 2, r * wn, d)) # [b, h, n//wn-2, r, wn, -1]
gathered_value = tf.reshape(
tf.gather(
blocked_value_matrix, rand_attn, batch_dims=2, name="gather_value"),
(-1, h, m // wm - 2, r * wn, d)) # [b, h, n//wn-2, r, wn, -1]
first_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 0],
key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
first_product = tf.multiply(first_product, 1.0 / np.sqrt(d))
first_product += (1.0 - to_mask) * -10000.0
first_attn_weights = tf.nn.softmax(first_product) # [b, h, wm, n]
first_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", first_attn_weights,
value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
first_context_layer = tf.expand_dims(first_context_layer, 2)
second_key_mat = tf.concat([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0]], 2) # [b, h, (4+r)*wn, -1]
second_value_mat = tf.concat([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0]], 2) # [b, h, (4+r)*wn, -1]
second_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 1], second_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_seq_pad = tf.concat([
to_mask[:, :, :, :3 * wn], to_mask[:, :, :, -wn:],
tf.ones_like(rand_mask[:, :1, 0, :1])], 3)
second_rand_pad = tf.concat(
[tf.ones_like(second_product[:, :, :, :4 * wn]), rand_mask[:, :, 0]], 3)
second_product = tf.multiply(second_product, 1.0 / np.sqrt(d))
second_product += (1.0 -
tf.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = tf.nn.softmax(second_product) # [b , h, wm, (4+r)*wn]
second_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", second_attn_weights, second_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_context_layer = tf.expand_dims(second_context_layer, 2)
exp_blocked_key_matrix = tf.concat([
blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2],
blocked_key_matrix[:, :, 3:-1]], 3) # [b, h, m//wm-4, 3*wn, -1]
exp_blocked_value_matrix = tf.concat([
blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2],
blocked_value_matrix[:, :, 3:-1]], 3) # [b, h, m//wm-4, 3*wn, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
inner_band_product = tf.einsum(
"BHLQD,BHLKD->BHLQK", middle_query_matrix, exp_blocked_key_matrix
) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, 3*wn]
inner_band_product = tf.multiply(inner_band_product, 1.0 / np.sqrt(d))
rand_band_product = tf.einsum(
"BHLQD,BHLKD->BHLQK", middle_query_matrix, gathered_key[:, :, 1:-1]
) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, r*wn]
rand_band_product = tf.multiply(rand_band_product, 1.0 / np.sqrt(d))
first_band_product = tf.einsum(
"BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
first_band_product = tf.multiply(first_band_product, 1.0 / np.sqrt(d))
last_band_product = tf.einsum(
"BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
last_band_product = tf.multiply(last_band_product, 1.0 / np.sqrt(d))
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (
1.0 - tf.expand_dims(to_mask[:, :, :, :wn], 3)) * -10000.0
last_band_product += (
1.0 - tf.expand_dims(to_mask[:, :, :, -wn:], 3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
band_product = tf.concat([
first_band_product, inner_band_product, rand_band_product,
last_band_product], -1) # [b, h, m//wm-4, wm, (5+r)*wn]
attn_weights = tf.nn.softmax(band_product) # [b, h, m//wm-4, wm, (5+r)*wn]
context_layer = tf.einsum(
"BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :, wn:4 * wn],
exp_blocked_value_matrix
) # [b, h, m//wm-4, wm, 3*wn] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :, 4 * wn:-wn],
gathered_value[:, :, 1:-1]
) # [b, h, m//wm-4, wm, r*wn] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :, :wn],
blocked_value_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :, -wn:],
blocked_value_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
second_last_key_mat = tf.concat([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1]], 2) # [b, h, (4+r)*wn, -1]
second_last_value_mat = tf.concat([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1]], 2) # [b, h, (4+r)*wn, -1]
second_last_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -2], second_last_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_last_seq_pad = tf.concat([
to_mask[:, :, :, :wn], to_mask[:, :, :, -3 * wn:],
tf.ones_like(rand_mask[:, :1, 0, :1])], 3)
second_last_rand_pad = tf.concat(
[tf.ones_like(second_last_product[:, :, :, :4 * wn]),
rand_mask[:, :, -1]], 3)
second_last_product = tf.multiply(second_last_product, 1.0 / np.sqrt(d))
second_last_product += (
1.0 - tf.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = tf.nn.softmax(
second_last_product) # [b, h, wm, (4+r)*wn]
second_last_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", second_last_attn_weights, second_last_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_last_context_layer = tf.expand_dims(second_last_context_layer, 2)
last_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -1],
key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
last_product = tf.multiply(last_product, 1.0 / np.sqrt(d))
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = tf.nn.softmax(last_product) # [b, h, wm, n]
last_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", last_attn_weights,
value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
last_context_layer = tf.expand_dims(last_context_layer, 2)
context_layer = tf.concat([
first_context_layer, second_context_layer, context_layer,
second_last_context_layer, last_context_layer
], 2)
context_layer = tf.reshape(context_layer, (-1, h, m, d)) * from_mask
context_layer = tf.transpose(context_layer, (0, 2, 1, 3))
return context_layer
class MultiHeadedAttentionLayer(tf.keras.layers.Layer):
"""A multi-headed attention layer.
It implements following types of multi-headed attention:
- original_full attention from "Attention is all you Need".
- simulated_sparse attention from BigBird with full quadratic implemention.
- block_sparse attention from BigBird with memory efficient linear impl.
"""
def __init__(self,
attention_type,
num_attention_heads=1,
size_per_head=512,
num_rand_blocks=3,
from_seq_length=1024,
to_seq_length=1024,
from_block_size=64,
to_block_size=64,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
use_bias=True,
seed=None,
query_act=None,
key_act=None,
value_act=None,
name=None):
"""Constructor for a multi-headed attention layer.
Args:
attention_type: Type of attention, needs to be one of ['original_full',
'simulated_sparse', 'block_sparse'].
num_attention_heads: (optional) int. Number of attention heads.
size_per_head: (optional) int. Size of each attention head.
num_rand_blocks: (optional) int. Number of random chunks per row.
from_seq_length: int. (optional) length of from sequence.
to_seq_length: int. (optional) length of to sequence.
from_block_size: (optional) int. size of block in from sequence.
to_block_size: (optional) int. size of block in to sequence.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: (optional) float. Range of the weight initializer.
use_bias: Whether the layer uses a bias vector.
seed: (Optional) int. Reandom seed for generating random mask.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
name: The name scope of this layer.
"""
super(MultiHeadedAttentionLayer, self).__init__(name=name)
self.num_attention_heads = num_attention_heads
self.size_per_head = size_per_head
self.num_rand_blocks = num_rand_blocks
self.from_seq_length = from_seq_length
self.to_seq_length = to_seq_length
self.from_block_size = from_block_size
self.to_block_size = to_block_size
self.seed = seed
with tf.compat.v1.variable_scope(name):
self.query_layer = utils.Dense3dLayer(
num_attention_heads, size_per_head,
utils.create_initializer(initializer_range), query_act,
"query", head_first=True, use_bias=use_bias)
self.key_layer = utils.Dense3dLayer(
num_attention_heads, size_per_head,
utils.create_initializer(initializer_range), key_act,
"key", head_first=True, use_bias=use_bias)
self.value_layer = utils.Dense3dLayer(
num_attention_heads, size_per_head,
utils.create_initializer(initializer_range), value_act,
"value", head_first=True, use_bias=use_bias)
if attention_type == "original_full":
logging.info("**** Using original full attention ****")
self.attention_dropout = recompute_grad.RecomputingDropout(
attention_probs_dropout_prob)
self.attn_impl = self.original_full_attention
elif attention_type == "simulated_sparse":
logging.info("**** Using simulated sparse attention ****")
self.attention_dropout = lambda x, training=None: x
self.rand_attn = self.generate_rand_attn_list()
self.rand_block_mask = self.convert_attn_list_to_mask(self.rand_attn)
self.attn_impl = self.bigbird_simulated_attention
elif attention_type == "block_sparse":
logging.info("**** Using block sparse attention ****")
assert from_seq_length//from_block_size == to_seq_length//to_block_size, (
"Error the number of blocks needs to be same!")
self.attention_dropout = None
self.rand_attn = self.generate_rand_attn_list()
self.attn_impl = self.bigbird_block_sparse_attention
else:
raise NotImplementedError(
"Attention type {} is not implemented".format(attention_type))
def generate_rand_attn_list(self):
# generate random attention and corresponding masks
if self.seed is not None:
np.random.seed(self.seed)
# old plans used in paper
if self.from_seq_length in [1024, 2048, 3072, 4096]:
rand_attn = [
bigbird_block_rand_mask( # pylint: disable=g-complex-comprehension
MAX_SEQ_LEN, MAX_SEQ_LEN,
self.from_block_size, self.to_block_size, self.num_rand_blocks,
last_idx=1024
)[:(self.from_seq_length // self.from_block_size - 2)]
for _ in range(self.num_attention_heads)
]
else:
plan_from_length, plan_num_rand_blocks = get_rand_attn_plan(
self.from_seq_length, self.from_block_size, self.num_rand_blocks)
rand_attn = bigbird_block_rand_mask_with_head(
seq_length=self.from_seq_length,
block_size=self.from_block_size,
num_heads=self.num_attention_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks)
rand_attn = np.stack(rand_attn, axis=0)
return tf.constant(rand_attn, dtype=tf.int32)
def convert_attn_list_to_mask(self, rand_attn):
temp_mask = [
full_bigbird_mask( # pylint: disable=g-complex-comprehension
self.from_seq_length, self.to_seq_length,
self.from_block_size, self.to_block_size,
rand_attn=rand_attn[i])
for i in range(self.num_attention_heads)
]
temp_mask = np.stack(temp_mask, axis=0)
temp_mask = np.array(temp_mask, dtype=bool)
rand_block_mask = tf.constant(temp_mask, dtype=tf.bool) # [N, F, T]
return tf.cast(rand_block_mask, tf.float32)
def original_full_attention(self,
query_layer,
key_layer,
value_layer,
masks,
training=None):
"""Full quadratic attention calculation.
Args:
query_layer: float Tensor of shape [batch_size, num_attention_heads,
from_seq_length, size_per_head]
key_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
value_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
masks: a list containing float32 Tensor representing attention_mask
of shape [batch_size, from_seq_length, to_seq_length].
The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and
will be unchanged for positions that are 1.
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
"""
attention_mask = masks[0]
# Directly take n^2 dot product between "query" and "key".
attention_scores = tf.einsum("BNFH,BNTH->BNFT", query_layer, key_layer)
attention_scores = tf.multiply(attention_scores,
1.0 / np.sqrt(float(self.size_per_head)))
if attention_mask is not None:
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - attention_mask) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs, training=training)
# `context_layer` = [B, F, N, H]
context_layer = tf.einsum("BNFT,BNTH->BFNH", attention_probs, value_layer)
return context_layer
def bigbird_simulated_attention(self,
query_layer,
key_layer,
value_layer,
masks,
training=None):
"""BigBird attention calculation using masks in quadratic time.
Args:
query_layer: float Tensor of shape [batch_size, num_attention_heads,
from_seq_length, size_per_head]
key_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
value_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
masks: a list containing float32 Tensor representing attention_mask
of shape [batch_size, from_seq_length, to_seq_length].
The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and
will be unchanged for positions that are 1.
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
"""
attention_mask = masks[0]
rand_block_mask = tf.expand_dims(self.rand_block_mask, 0) # [1, N, F, T]
if attention_mask is not None:
attention_mask = tf.minimum(attention_mask, rand_block_mask)
else:
attention_mask = rand_block_mask
return self.original_full_attention(
query_layer, key_layer, value_layer, [attention_mask],
training=training)
def bigbird_block_sparse_attention(self,
query_layer,
key_layer,
value_layer,
masks,
training=None):
"""BigBird attention sparse calculation using blocks in linear time.
Args:
query_layer: float Tensor of shape [batch_size, num_attention_heads,
from_seq_length, size_per_head]
key_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
value_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
masks: A list of 5 masks used in BigBird attention at position 1 to 5.
Position 0 (first element) is not used can be left as none. In the mask,
the values should be 1 or 0. The attention scores will effectively
be set to -infinity for any positions in the mask that are 0,
and will be unchanged for positions that are 1.
"None": Not needed.
"band_mask": (optional) float32 Tensor of shape
[batch_size, 1, from_seq_length//from_block_size-4,
from_block_size, 3*to_block_size].
"from_mask": (optional) float32 Tensor of shape
[batch_size, 1, from_seq_length, 1].
"to_mask": (optional) float32 Tensor of shape
[batch_size, 1, 1, to_seq_length].
"from_blocked_mask": (optional) float32 Tensor of shape
[batch_size, from_seq_length//from_block_size, from_block_size].
Same as from_mask, just reshaped.
"to_blocked_mask": (optional) float32 Tensor of shape
[batch_size, to_seq_length//to_block_size, to_block_size].
Same as to_mask, just reshaped.}
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
"""
(_, band_mask, from_mask, to_mask,
from_blocked_mask, to_blocked_mask) = masks
return bigbird_block_sparse_attention(
query_layer, key_layer, value_layer,
band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask,
self.rand_attn, self.num_attention_heads, self.size_per_head,
self.num_rand_blocks, self.from_seq_length, self.to_seq_length,
self.from_block_size, self.to_block_size)
def call(self,
from_tensor,
to_tensor,
masks,
cache=None,
decode_i=None,
training=None):
"""Implements a multi-headed attention layer from from_tensor to to_tensor.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width]
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
masks: A list of masks used in different attention. Only relevant masks
need to be supplied and at other positions place None. In the mask,
the values should be 1 or 0. The attention scores will effectively
be set to -infinity for any positions in the mask that are 0,
and will be unchanged for positions that are 1.
"attention_mask": (optional) float32 Tensor of shape
[batch_size, from_seq_length, to_seq_length].
"band_mask": (optional) float32 Tensor of shape
[batch_size, 1, from_seq_length//from_block_size-4,
from_block_size, 3*to_block_size].
"from_mask": (optional) float32 Tensor of shape
[batch_size, 1, from_seq_length, 1].
"to_mask": (optional) float32 Tensor of shape
[batch_size, 1, 1, to_seq_length].
"from_blocked_mask": (optional) float32 Tensor of shape
[batch_size, from_seq_length//from_block_size, from_block_size].
Same as from_mask, just reshaped.
"to_blocked_mask": (optional) float32 Tensor of shape
[batch_size, to_seq_length//to_block_size, to_block_size].
Same as to_mask, just reshaped.}
cache: (Used during prediction) A dictionary with tensors containing
results of previous attentions. The dictionary must have the items:
{"k": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head],
"v": tensor with shape
[batch_size, max_len, num_attention_heads, size_per_head]}
decode_i: (Used during prediction) current location of decoding
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
NotImplementedError: For unknown attention type.
"""
# Scalar dimensions referenced here:
# b = batch size (number of sequences)
# m = `from_tensor` sequence length
# n = `to_tensor` sequence length
# h = `num_attention_heads`
# d = `size_per_head`
# `query` = [b, h, m, d]
query = self.query_layer(from_tensor)
# `key` = [b, h, n, d]
key = self.key_layer(to_tensor)
# `value_layer` = [b, h, n, d]
value = self.value_layer(to_tensor)
if cache is not None and decode_i is not None:
max_len = utils.get_shape_list(cache["k"])[2]
indices_select = tf.reshape(
tf.one_hot(decode_i, max_len, dtype=to_tensor.dtype),
[1, 1, max_len, 1])
key = cache["k"] + key * indices_select
value = cache["v"] + value * indices_select
cache["k"] = key
cache["v"] = value
contextual_output = self.attn_impl(
query, key, value, masks, training=training)
return contextual_output
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flag definitions."""
import json
import sys
from absl import flags
from absl import logging
import bigbird
import tensorflow.compat.v2 as tf
import sentencepiece as spm
# pylint: disable=g-import-not-at-top
if sys.version_info >= (3, 9):
import importlib.resources as importlib_resources
else:
import importlib_resources
############################### FLAGS UTILS ####################################
FLAGS = flags.FLAGS
DEFINE_bool = flags.DEFINE_bool
DEFINE_enum = flags.DEFINE_enum
DEFINE_float = flags.DEFINE_float
DEFINE_integer = flags.DEFINE_integer
DEFINE_string = flags.DEFINE_string
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
# Basic model config flags
flags.DEFINE_float(
"attention_probs_dropout_prob", 0.1,
"The dropout probability for attention coefficients when using original.")
flags.DEFINE_string(
"hidden_act", "gelu",
"The non-linear activation function (function or string) in the encoder "
"and pooler.")
flags.DEFINE_float(
"hidden_dropout_prob", 0.1,
"The dropout probability for all fully connected layers in the embeddings, "
"encoder, decoder, and pooler.")
flags.DEFINE_integer(
"hidden_size", 768,
"Size of the transformer layers and the pooler layer.")
flags.DEFINE_float(
"initializer_range", 0.02,
"The stdev of the truncated_normal_initializer for initializing all "
"weight matrices.")
flags.DEFINE_integer(
"intermediate_size", 3072,
"The size of intermediate (i.e. feed-forward) layer in the Transformer.")
flags.DEFINE_integer(
"max_position_embeddings", 4096,
"The size position embeddings of matrix, which dictates the maximum"
"length for which the model can be run.")
flags.DEFINE_integer(
"num_attention_heads", 12,
"Number of attention heads for each attention layer in the Transformer.")
flags.DEFINE_integer(
"num_hidden_layers", 12,
"Number of hidden layers in the model (same for encoder and decoder).")
flags.DEFINE_integer(
"type_vocab_size", 2,
"The vocabulary size of the `token_type_ids`.")
flags.DEFINE_bool(
"use_bias", True,
"Whether to use bias for key/query/value.")
flags.DEFINE_bool(
"rescale_embedding", False,
"Whether to rescale word embedding by hidden dimensions.")
flags.DEFINE_bool(
"use_gradient_checkpointing", False,
"Whether to recompute encoder fwd pass during back prop for saving memory.")
flags.DEFINE_string(
"scope", "bert",
"Variable scope name.")
flags.DEFINE_string(
"vocab_model_file", "gpt2",
"The sentence piece model for vocabulary. Shortcuts for standard "
"gpt2 and pegasus vocabs are their name respectively.")
# Simulated and Block attention settings
flags.DEFINE_enum(
"attention_type", "block_sparse",
["original_full", "simulated_sparse", "block_sparse"],
"Selecting attention implementation. "
"'original_full': full attention from original bert. "
"'simulated_sparse': simulated sparse attention. "
"'block_sparse': blocked implementation of sparse attention.")
flags.DEFINE_enum(
"norm_type", "postnorm",
["prenorm", "postnorm"],
"Selecting when to apply layer-norm. "
"'prenorm': Before attention layer, e.g. Pegasus. "
"'postnorm': After attention layer, e.g. Bert.")
flags.DEFINE_integer(
"block_size", 16,
"The block size for the attention mask.")
flags.DEFINE_integer(
"num_rand_blocks", 3,
"Number of random blocks per row.")
# Adaptive optimizer configs
flags.DEFINE_float(
"weight_decay_rate", 0.01,
"L2 penalty as weight decay to be used.")
flags.DEFINE_float(
"optimizer_beta1", 0.9,
"The exponential decay rate for the 1st moment estimates.")
flags.DEFINE_float(
"optimizer_beta2", 0.999,
"The exponential decay rate for the 2nd moment estimates.")
flags.DEFINE_float(
"optimizer_epsilon", 1e-6,
"Adaptivty trade-off parameter.")
# TPU settings
flags.DEFINE_bool(
"use_tpu", False,
"Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"tpu_job_name", None,
"Name of TPU worker, if anything other than 'tpu_worker'")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"master", None,
"[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_string(
"iterations_per_loop", "1000",
"How many steps to make in each estimator call.")
def as_dictionary():
"""Get current config from flag."""
# Resolve vocab file location from hotword
if FLAGS.vocab_model_file == "gpt2":
FLAGS.vocab_model_file = str(importlib_resources.files(bigbird).joinpath(
"vocab/gpt2.model"))
elif FLAGS.vocab_model_file == "pegasus":
FLAGS.vocab_model_file = str(importlib_resources.files(bigbird).joinpath(
"vocab/pegasus.model"))
config = {
# transformer basic configs
"attention_probs_dropout_prob": FLAGS.attention_probs_dropout_prob,
"hidden_act": FLAGS.hidden_act,
"hidden_dropout_prob": FLAGS.hidden_dropout_prob,
"hidden_size": FLAGS.hidden_size,
"initializer_range": FLAGS.initializer_range,
"intermediate_size": FLAGS.intermediate_size,
"max_position_embeddings": FLAGS.max_position_embeddings,
"num_attention_heads": FLAGS.num_attention_heads,
"num_hidden_layers": FLAGS.num_hidden_layers,
"type_vocab_size": FLAGS.type_vocab_size,
"scope": FLAGS.scope,
"use_bias": FLAGS.use_bias,
"rescale_embedding": FLAGS.rescale_embedding,
"use_gradient_checkpointing": FLAGS.use_gradient_checkpointing,
"vocab_model_file": FLAGS.vocab_model_file,
# sparse mask configs
"attention_type": FLAGS.attention_type,
"norm_type": FLAGS.norm_type,
"block_size": FLAGS.block_size,
"num_rand_blocks": FLAGS.num_rand_blocks,
# common bert configs
"data_dir": FLAGS.data_dir,
"output_dir": FLAGS.output_dir,
"init_checkpoint": FLAGS.init_checkpoint,
"max_encoder_length": FLAGS.max_encoder_length,
"substitute_newline": FLAGS.substitute_newline,
"do_train": FLAGS.do_train,
"do_eval": FLAGS.do_eval,
"do_export": FLAGS.do_export,
"train_batch_size": FLAGS.train_batch_size,
"eval_batch_size": FLAGS.eval_batch_size,
"optimizer": FLAGS.optimizer,
"learning_rate": FLAGS.learning_rate,
"num_train_steps": FLAGS.num_train_steps,
"num_warmup_steps": FLAGS.num_warmup_steps,
"save_checkpoints_steps": FLAGS.save_checkpoints_steps,
"weight_decay_rate": FLAGS.weight_decay_rate,
"optimizer_beta1": FLAGS.optimizer_beta1,
"optimizer_beta2": FLAGS.optimizer_beta2,
"optimizer_epsilon": FLAGS.optimizer_epsilon,
# TPU settings
"use_tpu": FLAGS.use_tpu,
"tpu_name": FLAGS.tpu_name,
"tpu_zone": FLAGS.tpu_zone,
"tpu_job_name": FLAGS.tpu_job_name,
"gcp_project": FLAGS.gcp_project,
"master": FLAGS.master,
"num_tpu_cores": FLAGS.num_tpu_cores,
"iterations_per_loop": FLAGS.iterations_per_loop,
}
# pretraining dedicated flags
if hasattr(FLAGS, "max_predictions_per_seq"):
config["max_predictions_per_seq"] = FLAGS.max_predictions_per_seq
if hasattr(FLAGS, "masked_lm_prob"):
config["masked_lm_prob"] = FLAGS.masked_lm_prob
if hasattr(FLAGS, "max_eval_steps"):
config["max_eval_steps"] = FLAGS.max_eval_steps
if hasattr(FLAGS, "preprocessed_data"):
config["preprocessed_data"] = FLAGS.preprocessed_data
if hasattr(FLAGS, "use_nsp"):
config["use_nsp"] = FLAGS.use_nsp
# classifier dedicated flags
if hasattr(FLAGS, "num_labels"):
config["num_labels"] = FLAGS.num_labels
# summarization dedicated flags
if hasattr(FLAGS, "max_decoder_length"):
config["max_decoder_length"] = FLAGS.max_decoder_length
if hasattr(FLAGS, "trainable_bias"):
config["trainable_bias"] = FLAGS.trainable_bias
if hasattr(FLAGS, "couple_encoder_decoder"):
config["couple_encoder_decoder"] = FLAGS.couple_encoder_decoder
if hasattr(FLAGS, "beam_size"):
config["beam_size"] = FLAGS.beam_size
if hasattr(FLAGS, "alpha"):
config["alpha"] = FLAGS.alpha
if hasattr(FLAGS, "label_smoothing"):
config["label_smoothing"] = FLAGS.label_smoothing
# calculate vocab
sp_model = spm.SentencePieceProcessor()
sp_proto = tf.io.gfile.GFile(config["vocab_model_file"], "rb").read()
sp_model.LoadFromSerializedProto(sp_proto)
vocab_size = sp_model.GetPieceSize()
config["vocab_size"] = vocab_size
return config
def save(path):
"""Save current flag config."""
config = as_dictionary()
with tf.io.gfile.GFile(path, "w") as f:
json.dump(config, f, indent=4, sort_keys=True)
# log flags
max_len = max([len(ii) for ii in config.keys()])
fmt_string = "\t%" + str(max_len) + "s : %s"
logging.info("Arguments:")
for key, value in sorted(config.items()):
logging.info(fmt_string, key, value)
return config
def load(path):
"""Set flag from saved config."""
with tf.io.gfile.GFile(path) as f:
config = json.load(f)
# log and set flags
max_len = max([len(ii) for ii in config.keys()])
fmt_string = "\t%" + str(max_len) + "s : %s"
logging.info("Arguments:")
for key, value in config.items():
if hasattr(FLAGS, key):
logging.info(fmt_string, key, value)
setattr(FLAGS, key, value)
return config
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
import re
from absl import logging
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import resource_variable_ops
def get_linear_warmup_linear_decay_lr(init_lr, num_train_steps,
num_warmup_steps):
"""Calculate learning rate with linear warmup and linear decay."""
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.compat.v1.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_step, tf.float32)
warmup_steps_float = tf.cast(num_warmup_steps, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
return learning_rate
def get_linear_warmup_rsqrt_decay_lr(init_lr, hidden_size,
num_warmup_steps):
"""Calculate learning rate with linear warmup and rsqrt decay."""
num_warmup_steps = tf.cast(num_warmup_steps, tf.float32)
global_step = tf.compat.v1.train.get_or_create_global_step()
global_step = tf.cast(global_step, tf.float32)
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
learning_rate *= tf.math.rsqrt(tf.cast(hidden_size, tf.float32))
# Apply linear warmup
learning_rate *= tf.minimum(1.0, global_step / num_warmup_steps)
# Apply rsqrt decay
learning_rate *= tf.math.rsqrt(tf.maximum(global_step, num_warmup_steps))
return learning_rate
def get_optimizer(params, learning_rate):
"""Gets the optimzer based on the hparams and current mode (TPU vs. CPU/GPU).
Args:
params: A dictionary containing training hyperparameters.
learning_rate: A float32 scalar.
Returns:
A string or an optimizer instance.
"""
optimizer = None
if params["optimizer"] == "Adafactor":
try:
from tensor2tensor.utils import adafactor # pylint: disable=g-import-not-at-top
optimizer = adafactor.AdafactorOptimizer(learning_rate=learning_rate)
except ImportError:
logging.error("tensor2tensor not installed. Cannot use Adafactor."
"Defaulting to Adam.")
params["optimizer"] = "Adam"
if params["optimizer"] == "Adam":
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate,
beta1=params["optimizer_beta1"],
beta2=params["optimizer_beta2"],
epsilon=params["optimizer_epsilon"])
if params["optimizer"] == "AdamWeightDecay":
optimizer = AdamWeightDecayOptimizer(
learning_rate,
weight_decay_rate=params["weight_decay_rate"],
beta_1=params["optimizer_beta1"],
beta_2=params["optimizer_beta2"],
epsilon=params["optimizer_epsilon"],
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if params["optimizer"] == "SGD":
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
if optimizer is None:
raise ValueError("Unknown optimizer: {}.".format(params["optimizer"]))
if params["use_tpu"]:
# Average the gradients across TPU cores.
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
return optimizer
class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def _create_slots(self, var_list):
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
param_name = self._get_variable_name(var.name)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * var
update_with_lr = self.learning_rate * update
next_param = var - update_with_lr
return tf.group(
[var.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
def _resource_apply_dense(self, grad, var):
"""See `tf.train.Optimizer._resource_apply_dense()`."""
return self._apply_dense(grad, var)
def _apply_sparse(self, grad, var):
"""See `tf.train.Optimizer._apply_sparse()`."""
def scatter_update_fn(x, i, v):
return tf.compat.v1.scatter_update(x, i, v, use_locking=self._use_locking)
return self._apply_sparse_shared(
grad.values, grad.indices, var, scatter_update_fn)
def _resource_apply_sparse(self, grad, var, indices):
"""See `tf.train.Optimizer._resource_apply_spase()`."""
def scatter_update_fn(x, i, v):
with tf.control_dependencies(
[resource_variable_ops.resource_scatter_update(x.handle, i, v)]):
return x.value()
return self._apply_sparse_shared(grad, indices, var, scatter_update_fn)
def _apply_sparse_shared(self, grad, indices, var, scatter_update_fn):
"""Applies sparse gradients to a variable.
Args:
grad: A tensor for the `values` of `tf.IndexedSlices`.
indices: A tensor for the `indices` of `tf.IndexedSlices`.
var: A `tf.Variable` object.
scatter_update_fn: A function which performs scattered update to
a `tf.Variable` object. It takes tuple of (x, i, v) where:
* x: A `tf.Variable` object which is updated by `i` and `v`,
* i: A tensor for the `indices` of `tf.IndexedSlices`,
* v: A tensor for the `values` of `tf.IndexedSlices`,
and returns a tensor after updating `x`.
Returns:
An op which updates `var` with `grad` and `indices`.
"""
param_name = self._get_variable_name(var.name)
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
# m_t = beta1 * m + (1 - beta1) * g_t
m_scaled_g_values = tf.multiply(1.0 - self.beta_1, grad)
m_t = m.assign(m * self.beta_1)
with tf.control_dependencies([m_t]):
m_slice = tf.gather(m, indices) + m_scaled_g_values
m_t = scatter_update_fn(m, indices, m_slice)
# v_t = beta2 * v + (1 - beta2) * g_t^2
v_scaled_g_values = tf.multiply(1.0 - self.beta_2, tf.square(grad))
v_t = v.assign(v * self.beta_2)
with tf.control_dependencies([v_t]):
v_slice = tf.gather(v, indices) + v_scaled_g_values
v_t = scatter_update_fn(v, indices, v_slice)
update = m_t / (tf.sqrt(v_t) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * var
update_with_lr = self.learning_rate * update
next_param = var - update_with_lr
return tf.group([var.assign(next_param), m_t, v_t])
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigBird Encoder Layers."""
from bigbird.core import attention
from bigbird.core import recompute_grad
from bigbird.core import utils
import tensorflow.compat.v2 as tf
class PrenormEncoderLayer(tf.keras.layers.Layer):
"""Encoder layer of a transformer in Pegasus style.
The layer_norm is taken before self-attention.
"""
def __init__(self,
attention_type,
hidden_size=768,
intermediate_size=3072,
intermediate_act_fn=utils.gelu,
attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.1,
initializer_range=0.02,
num_attention_heads=12,
num_rand_blocks=3,
seq_length=1024,
block_size=64,
use_bias=True,
seed=None,
name=None):
"""Constructor of an encoder layer of a transformer in Pegasus style.
Args:
attention_type: Type of attention, needs to be one of ['original_full',
'simulated_sparse', 'block_sparse'].
hidden_size: (optional) int. Size of hidden dimension.
intermediate_size: (optional) int. Size of intermediate dimension.
intermediate_act_fn: optional) Activation function for intermediate layer.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
hidden_dropout_prob: (optional) float. Dropout probability of the
attention.
initializer_range: (optional) float. Range of the weight initializer.
num_attention_heads: (optional) int. Number of attention heads.
num_rand_blocks: (optional) int. Number of random chunks per row.
seq_length: (optional) int. length of sequence.
block_size: (optional) int. size of block in sequence.
use_bias: (optional) bool. Whether key/query/value uses a bias vector.
seed: (Optional) int. Reandom seed for generating random mask.
name: The name scope of this layer.
"""
super(PrenormEncoderLayer, self).__init__(name=name)
with tf.compat.v1.variable_scope(name):
attention_head_size = hidden_size // num_attention_heads
with tf.compat.v1.variable_scope("attention"):
# Pre-Normalization layer
with tf.compat.v1.variable_scope("self"):
self.first_layer_norm = utils.NormLayer(hidden_size)
# Self-Attention layer
self.attn_layer = attention.MultiHeadedAttentionLayer(
attention_type, num_attention_heads, attention_head_size,
num_rand_blocks, seq_length, seq_length, block_size, block_size,
attention_probs_dropout_prob, initializer_range, use_bias,
seed, name="self")
# Feedforward layer
with tf.compat.v1.variable_scope("output"):
self.projection_layer = utils.Dense3dProjLayer(
num_attention_heads, attention_head_size,
utils.create_initializer(initializer_range), None,
"dense", use_bias)
# Dropout
self.attention_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
with tf.compat.v1.variable_scope("intermediate"):
# Normalization layer
self.second_layer_norm = utils.NormLayer(hidden_size)
# Feedforward layer
self.expand_layer = utils.Dense2dLayer(
hidden_size, intermediate_size,
utils.create_initializer(initializer_range),
intermediate_act_fn, "dense")
with tf.compat.v1.variable_scope("output"):
# Feedforward layer
self.contract_layer = utils.Dense2dLayer(
intermediate_size, hidden_size,
utils.create_initializer(initializer_range),
None, "dense")
# Dropout
self.output_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
def call(self,
layer_input,
attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
input_blocked_mask=None,
training=None):
"""Implements a encoder layer of a transformer in Pegasus style.
Args:
layer_input: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) float32 Tensor of shape [batch_size,
seq_length, seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions
in the mask that are 0, and will be unchanged for positions that are 1.
band_mask: (optional) float32 Tensor of shape [batch_size, 1,
seq_length//block_size-4, block_size, 3*block_size].
The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
from_mask: (optional) float32 Tensor of shape [batch_size, 1,
seq_length, 1]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions
in the mask that are 0, and will be unchanged for positions that are 1.
to_mask: (optional) float32 Tensor of shape [batch_size, 1, 1,
seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions
in the mask that are 0, and will be unchanged for positions that are 1.
input_blocked_mask: (optional) float32 Tensor of shape [batch_size,
seq_length//block_size, block_size]. Same as from/to_mask, just
reshaped.
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
NotImplementedError: For unknown attention type.
"""
# self-attention
normalized_layer_input = self.first_layer_norm(layer_input)
attention_output = self.attn_layer(
normalized_layer_input, normalized_layer_input, [
attention_mask, band_mask, from_mask, to_mask, input_blocked_mask,
input_blocked_mask
], training=training)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.projection_layer(attention_output)
attention_output = self.attention_dropout(attention_output,
training=training)
attention_output = attention_output + layer_input
# The activation is only applied to the "intermediate" hidden layer.
normalized_attention_output = self.second_layer_norm(attention_output)
intermediate_output = self.expand_layer(normalized_attention_output)
# Down-project back to `hidden_size` then add the residual.
layer_output = self.contract_layer(intermediate_output)
layer_output = self.output_dropout(layer_output, training=training)
layer_output = layer_output + attention_output
return layer_output
class PostnormEncoderLayer(tf.keras.layers.Layer):
"""Encoder layer of a transformer in BERT style.
The layer_norm is taken after self-attention.
"""
def __init__(self,
attention_type,
hidden_size=768,
intermediate_size=3072,
intermediate_act_fn=utils.gelu,
attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.1,
initializer_range=0.02,
num_attention_heads=12,
num_rand_blocks=3,
seq_length=1024,
block_size=64,
use_bias=True,
seed=None,
name=None):
"""Constructor of an encoder layer of a transformer in BERT style.
Args:
attention_type: Type of attention, needs to be one of ['original_full',
'simulated_sparse', 'block_sparse'].
hidden_size: (optional) int. Size of hidden dimension.
intermediate_size: (optional) int. Size of intermediate dimension.
intermediate_act_fn: optional) Activation function for intermediate layer.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
hidden_dropout_prob: (optional) float. Dropout probability of the
attention.
initializer_range: (optional) float. Range of the weight initializer.
num_attention_heads: (optional) int. Number of attention heads.
num_rand_blocks: (optional) int. Number of random chunks per row.
seq_length: (optional) int. length of sequence.
block_size: (optional) int. size of block in sequence.
use_bias: (optional) bool. Whether key/query/value uses a bias vector.
seed: (Optional) int. Reandom seed for generating random mask.
name: The name scope of this layer.
"""
super(PostnormEncoderLayer, self).__init__(name=name)
with tf.compat.v1.variable_scope(name):
attention_head_size = hidden_size // num_attention_heads
with tf.compat.v1.variable_scope("attention"):
# Self-Attention layer
self.attn_layer = attention.MultiHeadedAttentionLayer(
attention_type, num_attention_heads, attention_head_size,
num_rand_blocks, seq_length, seq_length, block_size, block_size,
attention_probs_dropout_prob, initializer_range, use_bias,
seed, name="self")
with tf.compat.v1.variable_scope("output"):
# Feedforward layer
self.projection_layer = utils.Dense3dProjLayer(
num_attention_heads, attention_head_size,
utils.create_initializer(initializer_range), None,
"dense", use_bias)
# Post-Normalization layer
self.first_layer_norm = utils.NormLayer(hidden_size)
# Dropout
self.attention_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
with tf.compat.v1.variable_scope("intermediate"):
# Feedforward layer
self.expand_layer = utils.Dense2dLayer(
hidden_size, intermediate_size,
utils.create_initializer(initializer_range),
intermediate_act_fn, "dense")
with tf.compat.v1.variable_scope("output"):
# Feedforward layer
self.contract_layer = utils.Dense2dLayer(
intermediate_size, hidden_size,
utils.create_initializer(initializer_range),
None, "dense")
# Normalization layer
self.second_layer_norm = utils.NormLayer(hidden_size)
# Dropout
self.output_dropout = recompute_grad.RecomputingDropout(
hidden_dropout_prob)
def call(self,
layer_input,
attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
input_blocked_mask=None,
training=None):
"""Implements a encoder layer of a transformer in BERT style.
Args:
layer_input: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) float32 Tensor of shape [batch_size,
seq_length, seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions
in the mask that are 0, and will be unchanged for positions that are 1.
band_mask: (optional) float32 Tensor of shape [batch_size, 1,
seq_length//block_size-4, block_size, 3*block_size].
The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
from_mask: (optional) float32 Tensor of shape [batch_size, 1,
seq_length, 1]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions
in the mask that are 0, and will be unchanged for positions that are 1.
to_mask: (optional) float32 Tensor of shape [batch_size, 1, 1,
seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions
in the mask that are 0, and will be unchanged for positions that are 1.
input_blocked_mask: (optional) float32 Tensor of shape [batch_size,
seq_length//block_size, block_size]. Same as from/to_mask, just
reshaped.
training: Boolean indicating whether the call is training or inference.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
NotImplementedError: For unknown attention type.
"""
# self-attention
attention_output = self.attn_layer(
layer_input, layer_input, [
attention_mask, band_mask, from_mask, to_mask, input_blocked_mask,
input_blocked_mask
], training=training)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.projection_layer(attention_output)
attention_output = self.attention_dropout(attention_output,
training=training)
attention_output = self.first_layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
intermediate_output = self.expand_layer(attention_output)
# Down-project back to `hidden_size` then add the residual.
layer_output = self.contract_layer(intermediate_output)
layer_output = self.output_dropout(layer_output, training=training)
layer_output = self.second_layer_norm(layer_output + attention_output)
return layer_output
def add_gradient_recomputation(original_class):
"""Creats a subclass which enables gradient checkpointing."""
class RecomputeLayer(original_class):
"""Transformer layer that recomputes the forward pass during backprop."""
def call(self,
layer_input,
attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
input_blocked_mask=None,
training=None):
def f(layer_input, attention_mask, band_mask,
from_mask, to_mask, input_blocked_mask):
x = super(RecomputeLayer, self).call(
layer_input, attention_mask, band_mask, from_mask, to_mask,
input_blocked_mask, training=training)
return x
f = recompute_grad.recompute_grad(f)
return f(layer_input, attention_mask, band_mask,
from_mask, to_mask, input_blocked_mask)
return RecomputeLayer
class EncoderStack(tf.keras.layers.Layer):
"""Transformer encoder stack."""
def __init__(self, params):
name = "encoder"
super(EncoderStack, self).__init__(name=name)
self.params = params
if params["norm_type"] == "prenorm":
encoder_class = PrenormEncoderLayer
elif params["norm_type"] == "postnorm":
encoder_class = PostnormEncoderLayer
else:
raise NotImplementedError(
"Norm type {} is not implemented".format(params["norm_type"]))
if params["use_gradient_checkpointing"]:
encoder_class = add_gradient_recomputation(encoder_class)
with tf.compat.v1.variable_scope(name):
# Encoder layers
self.encoder_layers = [
encoder_class( # pylint: disable=g-complex-comprehension
self.params["attention_type"],
self.params["hidden_size"],
self.params["intermediate_size"],
utils.get_activation(self.params["hidden_act"]),
self.params["attention_probs_dropout_prob"],
self.params["hidden_dropout_prob"],
self.params["initializer_range"],
self.params["num_attention_heads"],
self.params["num_rand_blocks"],
self.params["max_encoder_length"],
self.params["block_size"],
self.params["use_bias"],
seed=layer_idx,
name="layer_%d" % layer_idx)
for layer_idx in range(self.params["num_hidden_layers"])
]
# Normalization layer
self.layer_norm = utils.NormLayer(self.params["hidden_size"])
def call(self,
encoder_inputs,
encoder_inputs_mask,
training=None):
"""Return the output of the decoder layer stacks.
Args:
encoder_inputs: tensor with shape
[batch_size, input_length, hidden_size]
encoder_inputs_mask: Mask for enccoder input. [batch_size, input_length]
training: Boolean indicating whether the call is training or inference.
Returns:
Finaly layer encoder output. float tensor with shape
[batch_size, input_length, hidden_size]
"""
if self.params["attention_type"] == "block_sparse":
# reshape and cast for blocking
encoder_length = self.params["max_encoder_length"]
encoder_block_size = self.params["block_size"]
encoder_inputs_mask = tf.cast(encoder_inputs_mask, tf.float32)
blocked_encoder_mask = tf.reshape(
encoder_inputs_mask,
(-1, encoder_length//encoder_block_size, encoder_block_size))
encoder_from_mask = tf.reshape(encoder_inputs_mask,
(-1, 1, encoder_length, 1))
encoder_to_mask = tf.reshape(encoder_inputs_mask,
(-1, 1, 1, encoder_length))
# create band padding
band_mask = attention.create_band_mask_from_inputs(
blocked_encoder_mask, blocked_encoder_mask)
# For unused masks 0 instead of None for compatilibity with recompute_grad
attention_mask = 0.0
else:
# For unused masks 0 instead of None for compatilibity with recompute_grad
blocked_encoder_mask = 0.0
encoder_to_mask = 0.0
encoder_from_mask = 0.0
band_mask = 0.0
encoder_inputs_mask = tf.cast(encoder_inputs_mask, tf.float32)
attention_mask = attention.create_attention_mask_from_input_mask(
encoder_inputs_mask, encoder_inputs_mask)
if self.params["norm_type"] == "postnorm":
encoder_inputs = self.layer_norm(encoder_inputs)
layer_output = encoder_inputs
for layer in self.encoder_layers:
layer_output = layer(
layer_output, attention_mask, band_mask,
encoder_from_mask, encoder_to_mask, blocked_encoder_mask,
training=training)
if self.params["norm_type"] == "prenorm":
layer_output = self.layer_norm(layer_output)
return layer_output
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam search branched from Pegasus.
Original source:
https://github.com/google-research/pegasus/blob/master/pegasus/layers/beam_search.py
This beam search implementation is designed for TPU usage only and prefers
flexibility over efficiency. Transformer attention caching is not enabled yet.
Mostly follows implementation in T2T. Several difference to pure beamsearch:
1. has finished and alive seqs, use 2 * beam_size to grow alive seqs,
which makes beam_size=1 doesn't equal greedy.
2. prefers finished seq over alive seqs.
3. prefers lower indices when equal probability (though unlikely).
4. with custom length normalization and constraint.
Notations:
B: batch_size, M: beam_size, T: max_decode_len, V: vocab_size, U: undefined
"""
# pylint: disable=invalid-name
import tensorflow.compat.v2 as tf
def length_normalization(start, alpha, min_len, max_len, out_of_range_penalty):
r"""Create length normalization function.
Combines length penalty from https://arxiv.org/abs/1609.08144,
and length constraint from https://www.aclweb.org/anthology/W18-2706.pdf.
scores = \sum_j log(P_j) / ((start + lengths)/(1 + start))**alpha
+ out_of_range_penalty * (length > max_len or length < min_len)
Args:
start: int, length normalization start offset.
alpha: float, [0, 1.0], length normalization power.
min_len: int, minimum decode length.
max_len: int, maximum decode lengths.
out_of_range_penalty: float, penalty for lengths outside min len and max
len. Use a negative number that penalize out of range decodes, does hard
constraint if set to -inf.
Returns:
fn(log_probs_BxM, length)->scores_BxM: a function to normalize sum log
probabilities of sequence with current decoding lengths.
"""
def length_norm_fn(log_probs_BxM, length_int):
"""Normalize sum log probabilities given a sequence length."""
dtype = log_probs_BxM.dtype
norm_flt = tf.pow(((start + tf.cast(length_int, dtype)) / (1. + start)),
alpha)
log_probs_BxM /= norm_flt
too_short_bool = tf.less(length_int, min_len)
too_long_bool = tf.logical_and(tf.greater(length_int, max_len), max_len > 0)
out_of_range_bool = tf.logical_or(too_long_bool, too_short_bool)
log_probs_BxM += out_of_range_penalty * tf.cast(out_of_range_bool, dtype)
return log_probs_BxM
return length_norm_fn
def beam_search(symbols_to_logits_fn,
init_seq_BxT,
initial_cache_BxU,
vocab_size,
beam_size,
length_norm_fn,
eos_id=1):
"""Beam search.
Args:
symbols_to_logits_fn: fn(seq_BxT, cache_BxU, i) -> (logits_BxV, cache_BxU)
init_seq_BxT: initial sequence ids.
initial_cache_BxU: dictionary of tensors with shape BxU.
vocab_size: vocabulary size.
beam_size: beam size.
length_norm_fn: length normalization function.
eos_id: end of sequence.
Returns:
Tuple of (beams_BxMxT, scores_BxM). Beam searched sequences and scores.
"""
B, T = init_seq_BxT.shape
M, V = beam_size, vocab_size
dtype = tf.float32
int_dtype = init_seq_BxT.dtype
def _loop_body(i, alive_seq_BxMxT, alive_log_probs_BxM, alive_cache_BxMxU,
finished_seq_BxMxT, finished_scores_BxM):
"""Beam search loop body."""
# Decode one step with beam
logits_BMxV, cache_BMxU = symbols_to_logits_fn(
_flatten_beam_dim(alive_seq_BxMxT),
tf.nest.map_structure(_flatten_beam_dim, alive_cache_BxMxU), i)
logits_BxMxV = _unflatten_beam_dim(logits_BMxV, M)
new_cache_BxMxU = tf.nest.map_structure(lambda t: _unflatten_beam_dim(t, M),
cache_BMxU)
# select top 2 * beam_size and fill alive and finished.
log_probs_BxMxV = logits_BxMxV - tf.reduce_logsumexp(
logits_BxMxV, axis=2, keepdims=True)
log_probs_BxMxV += tf.expand_dims(alive_log_probs_BxM, axis=2)
log_probs_BxMV = tf.reshape(log_probs_BxMxV, [B, -1])
new_log_probs_Bx2M, topk_indices_Bx2M = tf.nn.top_k(log_probs_BxMV, k=2 * M)
topk_beam_Bx2M = topk_indices_Bx2M // V
topk_seq_Bx2MxT, new_cache_Bx2MxU = _gather_nested(
[alive_seq_BxMxT, new_cache_BxMxU], topk_beam_Bx2M)
topk_ids_Bx2M = topk_indices_Bx2M % V
new_seq_Bx2MxT = _update_i(topk_seq_Bx2MxT, topk_ids_Bx2M, i)
new_finished_flags_Bx2M = tf.cast(
tf.reduce_any(tf.equal(new_seq_Bx2MxT, eos_id), axis=-1), dtype)
# get new alive
_, topk_alive_indices_BxM = tf.nn.top_k(
new_log_probs_Bx2M + new_finished_flags_Bx2M * dtype.min, k=M)
(alive_seq_BxMxT, alive_log_probs_BxM, alive_cache_BxMxU) = _gather_nested(
[new_seq_Bx2MxT, new_log_probs_Bx2M, new_cache_Bx2MxU],
topk_alive_indices_BxM)
# get new finished
new_scores_Bx2M = length_norm_fn(new_log_probs_Bx2M, i + 1)
new_scores_Bx2M += (1 - new_finished_flags_Bx2M) * dtype.min
finished_seq_Bx3MxT = tf.concat([finished_seq_BxMxT, new_seq_Bx2MxT],
axis=1)
finished_scores_Bx3M = tf.concat([finished_scores_BxM, new_scores_Bx2M],
axis=1)
_, topk_finished_indices_BxM = tf.nn.top_k(finished_scores_Bx3M, k=M)
(finished_seq_BxMxT, finished_scores_BxM) = _gather_nested(
[finished_seq_Bx3MxT, finished_scores_Bx3M], topk_finished_indices_BxM)
return [
i + 1, alive_seq_BxMxT, alive_log_probs_BxM, alive_cache_BxMxU,
finished_seq_BxMxT, finished_scores_BxM
]
# initialize.
init_i = tf.constant(0, dtype=int_dtype)
init_alive_seq_BxMxT = _expand_to_beam_size(init_seq_BxT, M)
log_probs_1xM = tf.constant([[0.] + [dtype.min] * (M - 1)], dtype=dtype)
init_alive_log_probs_BxM = tf.tile(log_probs_1xM, [B, 1])
init_alive_cache_BxMxU = tf.nest.map_structure(
lambda t: _expand_to_beam_size(t, M), initial_cache_BxU)
init_finished_seq_BxMxT = tf.zeros(tf.shape(init_alive_seq_BxMxT), int_dtype)
init_finished_scores_BxM = tf.zeros([B, M], dtype=dtype) + dtype.min
# run loop.
(_, final_alive_seq_BxMxT, final_alive_scores_BxM, _,
final_finished_seq_BxMxT, final_finished_scores_BxM) = tf.while_loop(
lambda *args: True, # Always do T iterations
_loop_body,
loop_vars=[
init_i, init_alive_seq_BxMxT, init_alive_log_probs_BxM,
init_alive_cache_BxMxU, init_finished_seq_BxMxT,
init_finished_scores_BxM
],
parallel_iterations=1,
back_prop=False,
maximum_iterations=T,
)
# process finished.
final_finished_flag_BxMx1 = tf.reduce_any(
tf.equal(final_finished_seq_BxMxT, eos_id), axis=-1, keepdims=True)
final_seq_BxMxT = tf.where(
tf.tile(final_finished_flag_BxMx1, [1, 1, T]), final_finished_seq_BxMxT,
final_alive_seq_BxMxT)
final_scores_BxM = tf.where(
tf.squeeze(final_finished_flag_BxMx1, axis=-1), final_finished_scores_BxM,
final_alive_scores_BxM)
return final_seq_BxMxT, final_scores_BxM
def _update_i(tensor_BxNxT, updates_BxN, i):
B, N, T = tensor_BxNxT.shape
tensor_BNxT = tf.reshape(tensor_BxNxT, [-1, T])
updates_BN = tf.reshape(updates_BxN, [-1])
batch_BN = tf.range(B * N, dtype=tf.int32)
i_BN = tf.fill([B * N], i)
ind_BNx2 = tf.stack([batch_BN, i_BN], axis=-1)
tensor_BNxT = tf.tensor_scatter_nd_update(tensor_BNxT, ind_BNx2, updates_BN)
return tf.reshape(tensor_BNxT, [B, N, T])
def _expand_to_beam_size(tensor_BxU, beam_size):
tensor_Bx1xU = tf.expand_dims(tensor_BxU, axis=1)
tile_dims = [1] * tensor_Bx1xU.shape.ndims
tile_dims[1] = beam_size
tensor_BxMxU = tf.tile(tensor_Bx1xU, tile_dims)
return tensor_BxMxU
def _flatten_beam_dim(tensor_BxMxU):
shape = tensor_BxMxU.shape.as_list()
tensor_BMxU = tf.reshape(tensor_BxMxU, [shape[0] * shape[1]] + shape[2:])
return tensor_BMxU
def _unflatten_beam_dim(tensor_BMxU, M):
shape = tensor_BMxU.shape.as_list()
tensor_BxMxU = tf.reshape(tensor_BMxU, [shape[0] // M, M] + shape[1:])
return tensor_BxMxU
def _gather_nested(nested_BxMxU, indices_BxN):
def _gather_beam(tensor_BxMxU):
tensor_BxNxU = tf.gather(tensor_BxMxU, indices_BxN, batch_dims=1, axis=1)
return tensor_BxNxU
return tf.nest.map_structure(_gather_beam, nested_BxMxU)
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper and utility functions."""
import re
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
############################### SHAPE UTILS ####################################
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if not tf.executing_eagerly() and name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
# assert False, "Static shape not available for {}".format(tensor)
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if not tf.executing_eagerly() and name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, int):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.compat.v1.get_variable_scope().name
raise ValueError(
"For the tensor `{}` in scope `{}`, the actual rank "
"`{}` (shape = {}) is not equal to the expected rank `{}`".format(
name, scope_name, actual_rank, str(tensor.shape),
str(expected_rank)))
############################### DENSE LAYERS ###################################
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range)
class Dense3dLayer(tf.keras.layers.Layer):
"""A dense layer with 3D kernel."""
def __init__(self,
num_attention_heads,
size_per_head,
initializer,
activation,
name=None,
head_first=False,
use_bias=True):
"""Constructor for dense layer with 3D kernel.
Args:
num_attention_heads: The size of output dimension.
size_per_head: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
head_first: Whether to output head dimension before or after sequence dim.
use_bias: Whether the layer uses a bias vector.
"""
super(Dense3dLayer, self).__init__(name=name)
self.num_attention_heads = num_attention_heads
self.size_per_head = size_per_head
self.initializer = initializer
self.activation = activation
self.head_first = head_first
self.use_bias = use_bias
with tf.compat.v1.variable_scope(name):
hidden_size = self.num_attention_heads * self.size_per_head
self.w = tf.compat.v1.get_variable(
name="kernel",
shape=[hidden_size, hidden_size],
initializer=self.initializer)
if self.use_bias:
self.b = tf.compat.v1.get_variable(
name="bias",
shape=[hidden_size],
initializer=tf.zeros_initializer())
else:
self.b = None
def call(self, input_tensor):
"""Constructor for dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
Returns:
float logits Tensor.
"""
hidden_size = self.num_attention_heads * self.size_per_head
reshape_w = tf.reshape(
self.w, [hidden_size, self.num_attention_heads, self.size_per_head])
if self.head_first:
ret = tf.einsum("abc,cde->adbe", input_tensor, reshape_w)
else:
ret = tf.einsum("abc,cde->abde", input_tensor, reshape_w)
if self.use_bias:
if self.head_first:
reshape_b = tf.reshape(
self.b, [1, self.num_attention_heads, 1, self.size_per_head])
else:
reshape_b = tf.reshape(
self.b, [self.num_attention_heads, self.size_per_head])
ret += reshape_b
if self.activation is not None:
return self.activation(ret)
else:
return ret
class Dense3dProjLayer(tf.keras.layers.Layer):
"""A dense layer with 3D kernel for projection."""
def __init__(self,
num_attention_heads,
size_per_head,
initializer,
activation,
name=None,
use_bias=True):
"""Constructor for dense layer with 3D kernel for projection.
Args:
num_attention_heads: The size of output dimension.
size_per_head: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
use_bias: Whether the layer uses a bias vector.
"""
super(Dense3dProjLayer, self).__init__(name=name)
self.num_attention_heads = num_attention_heads
self.size_per_head = size_per_head
self.initializer = initializer
self.activation = activation
self.use_bias = use_bias
with tf.compat.v1.variable_scope(name):
hidden_size = self.num_attention_heads * self.size_per_head
self.w = tf.compat.v1.get_variable(
name="kernel",
shape=[hidden_size, hidden_size],
initializer=self.initializer)
if self.use_bias:
self.b = tf.compat.v1.get_variable(
name="bias",
shape=[hidden_size],
initializer=tf.zeros_initializer())
else:
self.b = None
def call(self, input_tensor):
"""Constructor for dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
Returns:
float logits Tensor.
"""
hidden_size = self.num_attention_heads * self.size_per_head
reshape_w = tf.reshape(
self.w, [self.num_attention_heads, self.size_per_head, hidden_size])
ret = tf.einsum("BFNH,NHD->BFD", input_tensor, reshape_w)
if self.use_bias:
ret += self.b
if self.activation is not None:
return self.activation(ret)
else:
return ret
class Dense2dLayer(tf.keras.layers.Layer):
"""A dense layer with 2D kernel."""
def __init__(self,
input_size,
output_size,
initializer,
activation,
name=None,
use_bias=True):
"""Constructor for dense layer with 2D kernel.
Args:
input_size: The size of input dimension.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
use_bias: Whether the layer uses a bias vector.
"""
super(Dense2dLayer, self).__init__(name=name)
self.input_size = input_size
self.output_size = output_size
self.initializer = initializer
self.activation = activation
self.use_bias = use_bias
with tf.compat.v1.variable_scope(name):
self.w = tf.compat.v1.get_variable(
name="kernel",
shape=[self.input_size, self.output_size],
initializer=self.initializer)
if self.use_bias:
self.b = tf.compat.v1.get_variable(
name="bias",
shape=[self.output_size],
initializer=tf.zeros_initializer())
else:
self.b = None
def call(self, input_tensor):
"""Forward pass for dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
Returns:
float logits Tensor.
"""
ret = tf.einsum("abc,cd->abd", input_tensor, self.w)
if self.use_bias:
ret += self.b
if self.activation is not None:
return self.activation(ret)
else:
return ret
class SimpleDenseLayer(tf.keras.layers.Layer):
"""A simple dense layer with 2D kernel."""
def __init__(self,
input_size,
output_size,
initializer,
activation,
name=None,
use_bias=True):
"""Constructor for dense layer with 2D kernel.
Args:
input_size: The size of input dimension.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
use_bias: Whether the layer uses a bias vector.
"""
super(SimpleDenseLayer, self).__init__(name=name)
self.input_size = input_size
self.output_size = output_size
self.initializer = initializer
self.activation = activation
self.use_bias = use_bias
with tf.compat.v1.variable_scope(name):
self.w = tf.compat.v1.get_variable(
name="kernel",
shape=[self.input_size, self.output_size],
initializer=self.initializer)
if self.use_bias:
self.b = tf.compat.v1.get_variable(
name="bias",
shape=[self.output_size],
initializer=tf.zeros_initializer())
else:
self.b = None
def call(self, input_tensor):
"""Forward pass for dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 2.
Returns:
float logits Tensor.
"""
ret = tf.einsum("ab,bc->ac", input_tensor, self.w)
if self.use_bias:
ret += self.b
if self.activation is not None:
return self.activation(ret)
else:
return ret
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, str):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
############################## NORM LAYERS #####################################
class NormLayer(tf.keras.layers.Layer):
"""Replacement for contrib_layers.layer_norm."""
def __init__(self, hdim, dtype=tf.float32, name="LayerNorm"):
super(NormLayer, self).__init__(name=name)
self._dtype = dtype
with tf.compat.v1.variable_scope(name):
self.beta = tf.compat.v1.get_variable(
"beta", [hdim], dtype=dtype, initializer=tf.zeros_initializer())
self.gamma = tf.compat.v1.get_variable(
"gamma", [hdim], dtype=dtype, initializer=tf.ones_initializer())
def call(self, inputs):
inputs_shape = inputs.shape
# Compute norm along last axis
mean, variance = tf.nn.moments(inputs, [-1], keepdims=True)
# Compute layer normalization using the batch_normalization function.
# Note that epsilon must be increased for float16 due to the limited
# representable range.
variance_epsilon = 1e-12 if self._dtype != tf.float16 else 1e-3
outputs = tf.nn.batch_normalization(
inputs,
mean,
variance,
offset=self.beta,
scale=self.gamma,
variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
return outputs
############################# EMBEDDING LAYER ##################################
class EmbeddingLayer(tf.keras.layers.Layer):
"""An embedding layer."""
def __init__(self,
vocab_size,
emb_dim,
initializer,
scale_emb=False,
use_token_type=False,
num_token_types=16,
use_position_embeddings=True,
max_position_embeddings=4096,
dropout_prob=0.0,
name="embeddings"):
super(EmbeddingLayer, self).__init__(name=name)
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.scale_emb = scale_emb
self.num_token_types = num_token_types
self.max_position_embeddings = max_position_embeddings
self.dropout_prob = dropout_prob
with tf.compat.v1.variable_scope(name):
self.word_embeddings = tf.compat.v1.get_variable(
"word_embeddings", [vocab_size, emb_dim],
dtype=tf.float32, initializer=initializer)
if use_token_type:
self.token_type_table = tf.compat.v1.get_variable(
"token_type_embeddings", [num_token_types, emb_dim],
dtype=tf.float32, initializer=initializer)
else:
self.token_type_table = None
if use_position_embeddings:
self.position_embeddings = tf.compat.v1.get_variable(
"position_embeddings", [max_position_embeddings, emb_dim],
dtype=tf.float32, initializer=initializer)
else:
self.position_embeddings = None
def call(self,
input_ids,
seq_length,
start_pos=0,
token_type_ids=None,
training=None):
if input_ids is None:
return None
# subtoken embedding
output = tf.nn.embedding_lookup(params=self.word_embeddings, ids=input_ids)
if self.scale_emb:
output = output * self.emb_dim ** 0.5
if self.token_type_table is not None:
# This vocab will be small so we always do one-hot here, since it is
# always faster for a small vocabulary.
one_hot_ids = tf.one_hot(token_type_ids, depth=self.num_token_types)
token_type_embeddings = tf.tensordot(
one_hot_ids, self.token_type_table, 1)
output += token_type_embeddings
if self.position_embeddings is not None:
# assert_op = tf.compat.v1.assert_less_equal(
# start_pos + seq_length, self.max_position_embeddings)
# with tf.control_dependencies([assert_op]):
# So `position_embeddings` is effectively an embedding table for
# position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(self.position_embeddings, [start_pos, 0],
[seq_length, self.emb_dim])
output += tf.expand_dims(position_embeddings, axis=0)
if training and self.dropout_prob > 0:
output = tf.nn.dropout(output, self.dropout_prob)
return output
def linear(self, x):
"""Computes logits by running x through a linear layer.
Args:
x: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
with tf.compat.v1.name_scope("presoftmax_linear"):
logits = tf.tensordot(x, self.word_embeddings, [[-1], [1]])
return logits
########################## TPU/CHECKPOINT UTILS ################################
def get_estimator(config, model_fn, keep_checkpoint_max=10):
"""Create TPUEstimator object for given config and model_fn."""
tpu_cluster_resolver = None
if config["use_tpu"] and config["tpu_name"]:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
config["tpu_name"],
zone=config["tpu_zone"],
project=config["gcp_project"])
# Batch size book-keeping
# Estimators handle batch sizes differently among GPUs and TPUs
# GPU: Estimator needs per core batch size
# TPU: Estimator needs total batch size, i.e. num_cores * per core batch size
config_train_batch_size = config["train_batch_size"] # For estimator
config_eval_batch_size = config["eval_batch_size"] # For estimator
effective_train_batch_size = config["train_batch_size"] # For human
effective_eval_batch_size = config["eval_batch_size"] # For human
session_config = None
if config["use_tpu"]:
sliced_eval_mode = tf.compat.v1.estimator.tpu.InputPipelineConfig.SLICED
distribute_strategy = None
config_train_batch_size *= config["num_tpu_cores"]
config_eval_batch_size *= config["num_tpu_cores"]
effective_train_batch_size = config_train_batch_size
effective_eval_batch_size = config_eval_batch_size
else:
session_config = tf.compat.v1.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=1.2))
cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
with tf.compat.v1.Session(cluster_resolver.master(),
config=session_config) as sess:
logging.info(sess.list_devices())
sliced_eval_mode = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V1
distribute_strategy = tf.distribute.MirroredStrategy(devices=None)
effective_train_batch_size *= distribute_strategy.num_replicas_in_sync
# effective_eval_batch_size *= distribute_strategy.num_replicas_in_sync
is_per_host = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.compat.v1.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=config["master"],
model_dir=config["output_dir"],
save_checkpoints_steps=config["save_checkpoints_steps"],
keep_checkpoint_max=keep_checkpoint_max,
train_distribute=distribute_strategy,
session_config=session_config,
tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(
tpu_job_name=config["tpu_job_name"],
iterations_per_loop=config["iterations_per_loop"],
num_shards=config["num_tpu_cores"],
per_host_input_for_training=is_per_host,
eval_training_input_configuration=sliced_eval_mode))
if config["init_checkpoint"]:
ckpt_var_list = tf.compat.v1.train.list_variables(config["init_checkpoint"])
ckpt_var_list = {
name: shape for name, shape in ckpt_var_list
if not re.findall("(Adam|Adafactor|global_step)", name)
}
vars_to_warm_start = "({})".format("|".join(ckpt_var_list.keys()))
warm_start_settings = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=config["init_checkpoint"],
vars_to_warm_start=vars_to_warm_start)
else:
ckpt_var_list = {}
warm_start_settings = None
config["ckpt_var_list"] = ckpt_var_list
# If no TPU, this will fall back to normal Estimator on CPU or GPU.
estimator = tf.compat.v1.estimator.tpu.TPUEstimator(
use_tpu=config["use_tpu"],
model_fn=model_fn,
config=run_config,
train_batch_size=config_train_batch_size,
eval_batch_size=config_eval_batch_size,
warm_start_from=warm_start_settings)
# assign batch sizes
estimator.train_batch_size = effective_train_batch_size
estimator.eval_batch_size = effective_eval_batch_size
return estimator
def log_variables(variables, ckpt_var_list):
"""Log trainable variables."""
logging.info("**** Trainable Variables ****")
model_var_list = {var.name: var.get_shape().as_list() for var in variables}
num_params = sum(np.prod(shape) for shape in model_var_list.values())
length = max(len(name) for name in model_var_list) + 2
line = "{{:<{}}}{{:<13}}{{}}".format(length)
logging.info("The model has {} trainable variables "
"({:,} parameters):\n".format(len(model_var_list), num_params))
logging.info(line.format("Name", "Initialized", "Shape"))
logging.info(line.format("----", "-----------", "-----"))
ckpt_var_list = ckpt_var_list.copy()
for name, shape in model_var_list.items():
name = name.split(":")[0]
if name in ckpt_var_list:
warm_started = "from ckpt"
del ckpt_var_list[name]
else:
warm_started = "random"
logging.info(line.format(name, warm_started, shape))
if ckpt_var_list:
logging.warning(
"The warm start checkpoint contained %d variables that were not used "
"for the model:\n", len(ckpt_var_list))
for name, shape in ckpt_var_list.items():
logging.warning(line.format(name, "not used", shape))
def add_scalars_to_summary(summary_dir, scalar_tensors_dict):
"""Creates a host_call function that writes summaries on TPU."""
# All tensors outfed from TPU should preserve batch size dimension.
scalar_tensors_dict = {
k: tf.reshape(v, [1]) for k, v in scalar_tensors_dict.items()
}
def host_call_fn(**kwargs):
writer = tf.summary.create_file_writer(summary_dir, max_queue=1000)
always_record = tf.summary.record_if(True)
with writer.as_default(), always_record:
for name, scalar in kwargs.items():
tf.summary.scalar(name, tf.reduce_mean(scalar),
tf.compat.v1.train.get_or_create_global_step())
return tf.compat.v1.summary.all_v2_summary_ops()
return host_call_fn, scalar_tensors_dict
########################## DEFAULT CONFIG UTILS ################################
def get_default_config():
"""Default values for BigBird."""
default_config = {
# transformer basic configs
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 4096,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"use_bias": True,
"rescale_embedding": False,
"scope": "bert",
# sparse mask configs
"attention_type": "block_sparse",
"norm_type": "postnorm",
"block_size": 16,
"num_rand_blocks": 3,
# common bert configs
"max_encoder_length": 1024,
"max_decoder_length": 64,
"couple_encoder_decoder": False,
"beam_size": 5,
"alpha": 0.7,
"label_smoothing": 0.1,
"weight_decay_rate": 0.01,
"optimizer_beta1": 0.9,
"optimizer_beta2": 0.999,
"optimizer_epsilon": 1e-6,
# TPU settings
"use_tpu": True,
"tpu_name": None,
"tpu_zone": None,
"tpu_job_name": None,
"gcp_project": None,
"master": None,
"num_tpu_cores": 8,
"iterations_per_loop": "1000",
}
return default_config
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for rematerialization.
Incubates a version of tf.recompute_grad that is XLA compatible.
"""
import collections
import numbers
import os
import threading
from typing import Deque, List, NamedTuple, Optional, Sequence, Text, Union
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.ops import custom_gradient
# Remove when https://github.com/tensorflow/tensorflow/pull/45298
# gets merged
def get_variable_by_name(var_name):
"""Retrieves tf.Variable from name in MirroredStrategy (multi-gpu)."""
# Get all variables, but it will have copies from different replicas
all_global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def _replica_filter(var):
"""Filter out variables from different context."""
try:
return var_name == var.op.name
except AttributeError:
return False
candidate_vars = list(filter(_replica_filter, all_global_vars))
if len(candidate_vars) >= 1:
# Filter out non-trainable variables.
candidate_vars = [v for v in candidate_vars if v.trainable]
else:
raise ValueError('Unsuccessful at finding variable {}.'.format(var_name))
if len(candidate_vars) == 1:
return candidate_vars[0]
elif len(candidate_vars) > 1:
raise ValueError(
'Unsuccessful at finding trainable variable {}. '
'Number of candidates: {}. '
'Candidates: {}'.format(var_name, len(candidate_vars), candidate_vars))
else:
# The variable is not trainable.
return None
custom_gradient.get_variable_by_name = get_variable_by_name
class RecomputeContext(
NamedTuple('RecomputeContext', [
('is_recomputing', bool),
('seed', tf.Tensor),
('children', Deque['RecomputeContext']),
])):
"""Context for recomputation.
Attributes:
is_recomputing: Whether we are in a recomputation phase.
seed: Scalar integer tensor that should be used with stateless random ops
for deterministic behavior and correct computation of the gradient.
children: Nested `RecomputeContext` instances. Used internally by
`recompute_grad` to track nested instances of `RecomputeContext`.
"""
def __enter__(self):
return _context_stack.push(self)
def __exit__(self, exc_type, exc_value, traceback):
_context_stack.pop(self)
# Simplified version of `_DefaultStack` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/ops.py.
class _ContextStack(threading.local):
"""A thread-local stack for providing implicit recompute contexts."""
def __init__(self):
super(_ContextStack, self).__init__()
self._stack = []
def top(self) -> Optional[RecomputeContext]:
return self._stack[-1] if self._stack else None
def push(self, context: RecomputeContext):
self._stack.append(context)
return context
def pop(self, context: RecomputeContext):
if self._stack[-1] is not context:
raise AssertionError('Nesting violated for RecomputeContext.')
self._stack.pop()
_context_stack = _ContextStack()
def get_recompute_context() -> Optional[RecomputeContext]:
"""Returns the current recomputing context if it exists."""
return _context_stack.top()
# Adapted from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/control_flow_util.py.
def _get_containing_xla_context(graph: tf.Graph) -> Optional[object]:
"""Returns the first ancestor `XLAControlFlowContext` in the `graph`."""
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
while ctxt:
if ctxt.IsXLAContext():
return ctxt
ctxt = ctxt.outer_context
return None
def _in_xla_context(graph: Optional[tf.Graph] = None) -> bool:
"""Detects whether we are in an XLA context."""
if '--tf_xla_auto_jit=2' in os.environ.get('TF_XLA_FLAGS', ''):
return True
graph = tf.compat.v1.get_default_graph() if graph is None else graph
while True:
if _get_containing_xla_context(graph) is not None:
return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def _force_data_dependency(
first_compute: Sequence[tf.Tensor],
then_compute: Sequence[tf.Tensor]) -> List[tf.Tensor]:
"""Force all of `then_compute` to depend on all of `first_compute`.
Uses a dummy data dependency, which is useful when running on TPUs because
XLA ignores control dependencies. Only supports float arguments.
Args:
first_compute: Sequence of `Tensor`s to be executed before `then_compute`.
then_compute: Sequence of `Tensor`s to executed after `first_compute`.
Returns:
Sequence of `Tensor`s with same length of `then_compute`.
Raises:
ValueError: if ranks are unknown or types are not floating.
"""
def _first_element(x):
if x.shape.ndims is None:
raise ValueError('Rank of Tensor %s must be known' % x)
ndims = x.shape.ndims
begin = tf.zeros(ndims, dtype=tf.int32)
size = tf.ones(ndims, dtype=tf.int32)
return tf.reshape(tf.slice(x, begin, size), [])
first_compute_sum = tf.add_n(
[_first_element(x) for x in first_compute if x is not None])
dtype = first_compute_sum.dtype
if not dtype.is_floating:
raise ValueError('_force_data_dependency only supports floating dtypes.')
zero = np.finfo(dtype.as_numpy_dtype).tiny * first_compute_sum
return [
x + tf.cast(zero, x.dtype) if x is not None else None
for x in then_compute
]
def _make_seed_if_none(seed: Optional[tf.Tensor]) -> tf.Tensor:
"""Uses the global generator to make a seed if necessary."""
if seed is not None:
return seed
generator = tf.random.experimental.get_global_generator()
# The two seeds for stateless random ops don't have individual semantics and
# are scrambled together, so providing one seed is fine. This makes it easier
# for users to provide a local seed without worrying about integer overflow.
# See `make_seeds` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/stateful_random_ops.py.
try:
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
except (RuntimeError, TypeError, ValueError, tf.errors.NotFoundError) as e:
# For a number of reasons, the above operation can fail like using multiple
# graphs or toggling between eager and graph modes. Reset the generator.
logging.warn('Resetting the generator. %s: %s', type(e), e)
tf.random.experimental.set_global_generator(None)
generator = tf.random.experimental.get_global_generator()
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
def recompute_grad(f, seed=None):
"""An eager-compatible version of recompute_grad.
For f(*args, **kwargs), this supports gradients with respect to args, or to
gradients with respect to any variables residing in the kwarg 'variables'.
Note that for keras layer and model objects, this is handled automatically.
Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not
be able to access the member variables of that object, because `g` returns
through the wrapper function `inner`. When recomputing gradients through
objects that inherit from keras, we suggest keeping a reference to the
underlying object around for the purpose of accessing these variables.
Args:
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
seed: Optional seed for random ops. `seed` should an integer scalar
`Tensor`. When compiling to XLA, `seed` must have dtype `tf.int32`. If
`seed` is not provided one will be generated.
Returns:
A function `g` that wraps `f`, but which recomputes `f` on the backwards
pass of a gradient call.
"""
@tf.custom_gradient
def inner(*args, **kwargs):
"""Inner function closure for calculating gradients."""
# Detect when we're nested and in the backwards pass, so we don't generate
# an additional seed.
parent_context = get_recompute_context()
if parent_context is not None and parent_context.is_recomputing:
# Use the cached context in the recomputation phase.
with parent_context.children.popleft()._replace(
is_recomputing=True) as context:
result = f(*args, **kwargs)
else:
with RecomputeContext(
is_recomputing=False,
seed=_make_seed_if_none(seed),
children=collections.deque()) as context:
result = f(*args, **kwargs)
# In the forward pass, build up a tree of recomputation contexts.
if parent_context is not None and not parent_context.is_recomputing:
parent_context.children.append(context)
def grad(*dresult, **grad_kwargs):
"""Gradient function calculation for inner function."""
variables = grad_kwargs.pop('variables', None)
if grad_kwargs:
raise ValueError('Found unexpected kwargs for `grad`: ',
list(grad_kwargs.keys()))
inputs, seed = list(args), context.seed
if _in_xla_context():
inputs = _force_data_dependency(
tf.nest.flatten(dresult), inputs + [seed])
seed = inputs.pop()
# tf.keras.backend.set_learning_phase(1)
with tf.GradientTape() as tape:
tape.watch(inputs)
if variables is not None:
tape.watch(variables)
with tf.control_dependencies(dresult):
with context._replace(is_recomputing=True, seed=seed):
result = f(*inputs, **kwargs)
kw_vars = []
if variables is not None:
kw_vars = list(variables)
grads = tape.gradient(
result, list(inputs) + kw_vars, output_gradients=dresult)
return grads[:len(inputs)], grads[len(inputs):]
return result, grad
return inner
######################## STATELESS DROPOUT LAYERS ##############################
def _as_shape(shape: Union[Sequence[int], tf.TensorShape]) -> tf.TensorShape:
"""Converts the given object to a TensorShape."""
return shape if isinstance(shape, tf.TensorShape) else tf.TensorShape(shape)
def _get_noise_shape(
x: tf.Tensor, noise_shape: Union[Sequence[int], tf.TensorShape]
) -> Union[tf.Tensor, tf.TensorShape, Sequence[int]]:
"""Computes the shape of the binary mask for dropout."""
# If noise_shape is none return immediately.
if noise_shape is None:
return tf.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = _as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tf.TensorShape(new_dims)
return noise_shape
def stateless_dropout(x: tf.Tensor,
rate: float,
seed: tf.Tensor,
noise_shape: Optional[Union[Sequence[int],
tf.TensorShape]] = None,
name: Optional[Text] = None) -> tf.Tensor:
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
See https://www.tensorflow.org/api_docs/python/tf/nn/dropout.
This version differs in that the seed is required if the rate is nonzero.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability that each
element is dropped. For example, setting rate=0.1 would drop 10% of input
elements.
seed: A shape [2] integer Tensor of seeds to the random number generator.
Must have dtype `tf.int32` when compiling to XLA.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: A name for this operation (optional).
Returns:
A `Tensor` of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with tf.name_scope(name or 'stateless_dropout') as name:
x = tf.convert_to_tensor(x, name='x')
if not x.dtype.is_floating:
raise ValueError('x has to be a floating point tensor since it\'s going '
' to be scaled. Got a %s tensor instead.' % x.dtype)
if isinstance(rate, numbers.Real):
if not (rate >= 0 and rate < 1):
raise ValueError('rate must be a scalar tensor or a float in the '
'range [0, 1), got %g' % rate)
if rate > 0.5:
logging.log_first_n(
logging.WARN, 'Large dropout rate: %g (>0.5). In TensorFlow '
'.x, dropout() uses dropout rate instead of keep_prob. '
'Please ensure that this is intended.', 5, rate)
# Early return if nothing needs to be dropped.
if tf.get_static_value(rate) == 0:
return x
rate = tf.convert_to_tensor(rate, dtype=x.dtype, name='rate')
rate.shape.assert_has_rank(0)
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger than
# rate.
#
# NOTE: Random uniform actually can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = tf.random.stateless_uniform(
noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
# NOTE: if (1.0 + rate) - 1 is equal to rate, then we want to consider that
# float to be selected, hence we use a >= comparison.
keep_mask = random_tensor >= rate
ret = x * scale * tf.cast(keep_mask, x.dtype)
if not tf.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
# Reimplements internal function
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/smart_cond.py.
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError('`true_fn` must be callable.')
if not callable(false_fn):
raise TypeError('`false_fn` must be callable.')
pred_value = tf.get_static_value(pred)
if isinstance(pred, tf.Variable) or pred_value is None:
return tf.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
if pred_value:
return true_fn()
else:
return false_fn()
# See https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout.
class RecomputingDropout(tf.keras.layers.Layer):
"""`tf.keras.layers.Dropout` that supports `recompute_grad`."""
def __init__(self,
rate,
noise_shape=None,
seed=None,
force_recomputation=False,
**kwargs):
"""Initializes `RecomputingDropout`.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the binary
dropout mask that will be multiplied with the input. For instance, if
inputs have shape `(batch_size, timesteps, features)` and you want the
dropout mask to be the same for all timesteps, you can use
`noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
force_recomputation: If `True`, then raises an error if called outside a
recompute context.
**kwargs: Keyword arguments for `tf.keras.layers.Layer`.
"""
super(RecomputingDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.force_recomputation = force_recomputation
self.supports_masking = True
# Create a layer-specific seed to combine with the global recompute seed.
self._recompute_seed = (
np.random.randint(-2**31, 2**31, dtype=np.int32)
if seed is None else seed)
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = tf.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tf.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
"""Builds computation graph.
Args:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
`inputs` masked according to layer configuration.
Raises:
ValueError: If `force_recomputation` is `True` and called outside a
a recompute context.
"""
if self.rate == 0:
return inputs
if training is None:
training = tf.keras.backend.learning_phase()
def dropped_inputs():
"""Randomly drops elements of `inputs` when `training=True`."""
recompute_context = get_recompute_context()
if recompute_context is None:
if self.force_recomputation:
raise ValueError(
'RecomputeContext is required when force_recomputation=True.')
return tf.nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
seed = tf.stack([recompute_context.seed, self._recompute_seed])
return stateless_dropout(
inputs,
rate=self.rate,
seed=seed,
noise_shape=self._get_noise_shape(inputs))
output = smart_cond(training, dropped_inputs, lambda: tf.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed,
'force_recomputation': self.force_recomputation,
}
base_config = super(RecomputingDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BigBird model and related functions."""
import copy
from absl import logging
from bigbird.core import decoder
from bigbird.core import encoder
from bigbird.core import utils
import tensorflow.compat.v2 as tf
class BertModel(tf.keras.layers.Layer):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into SentencePiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
params=utils.get_default_config()
params['vocab_size']=32000
params['hidden_size']=512,
params['num_hidden_layers']=8
params['num_attention_heads']=6
params['intermediate_size']=1024
model = modeling.BertModel(params, training=True)
_, pooled_output = model(input_ids=input_ids, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self, params):
"""Constructor for BertModel.
Args:
params: `BigBirdConfig` dictionary.
"""
self.params = copy.deepcopy(params)
self.scope = params["scope"]
super(BertModel, self).__init__(name=self.scope)
# validate params
self.pad = lambda x: x
if params["max_encoder_length"] <= 512:
logging.info("Switching to full attention for short sequences")
self.params["attention_type"] = "original_full"
if self.params["attention_type"] == "simulated_sparse" or self.params[
"attention_type"] == "block_sparse":
if params["max_encoder_length"] % params["block_size"]:
logging.info("Expand max_encoder_length to next multiple of block_size")
self.params["max_encoder_length"] = (
params["max_encoder_length"] // params["block_size"] +
1) * params["block_size"]
pad_size = self.params["max_encoder_length"] - params[
"max_encoder_length"]
paddings = [[0, 0], [0, pad_size]]
self.pad = lambda x: tf.pad(x, paddings)
with tf.compat.v1.variable_scope(self.scope, reuse=tf.compat.v1.AUTO_REUSE):
self.embeder = utils.EmbeddingLayer(
vocab_size=self.params["vocab_size"],
emb_dim=self.params["hidden_size"],
initializer=utils.create_initializer(
self.params["initializer_range"]),
scale_emb=self.params["rescale_embedding"],
use_token_type=True,
num_token_types=self.params["type_vocab_size"],
use_position_embeddings=True,
max_position_embeddings=self.params["max_position_embeddings"],
dropout_prob=self.params["hidden_dropout_prob"])
self.encoder = encoder.EncoderStack(self.params)
self.pooler = utils.SimpleDenseLayer(
input_size=self.params["hidden_size"],
output_size=self.params["hidden_size"],
initializer=utils.create_initializer(
self.params["initializer_range"]),
activation=tf.tanh,
name="pooler/dense")
def call(self,
input_ids,
token_type_ids=None,
training=None):
"""Constructor for BertModel.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
training: Boolean indicating whether the call is training or inference.
Returns:
sequence_output: Tensor of shape [batch_size, seq_length, hidden_size]
pooled_output: Tensor of shape [batch_size, hidden_size]
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
# pad if needed
input_ids = self.pad(input_ids)
if token_type_ids is None:
token_type_ids = tf.zeros_like(input_ids, dtype=tf.int32)
else:
token_type_ids = self.pad(token_type_ids)
# Perform embedding lookup on the word ids.
embedding_output = self.embeder(input_ids,
self.params["max_encoder_length"],
token_type_ids=token_type_ids,
training=training)
# Generate mask.
input_mask = tf.where(input_ids > 0,
tf.ones_like(input_ids), tf.zeros_like(input_ids))
# Run the stacked transformer.
sequence_output = self.encoder(embedding_output, input_mask, training)
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
first_token_tensor = sequence_output[:, 0, :]
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
pooled_output = self.pooler(first_token_tensor)
return sequence_output, pooled_output
class TransformerModel(tf.keras.layers.Layer):
"""Encoder-Decoder transformer model.
Example usage:
```python
# Already been converted into SentencePiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
target_ids = tf.constant([[43, 76, 38], [56, 8, 0]])
params = utils.BigBirdConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.TransformerModel(params, train=True)
predictions, _ = model(input_ids=input_ids, target_ids=target_ids)
log_probs, logits, pred_ids = predictions
...
```
"""
def __init__(self, params):
"""Constructor for TransformerModel.
Args:
params: `BigBirdConfig` dictionary.
"""
self.params = copy.deepcopy(params)
self.scope = params["scope"]
super(TransformerModel, self).__init__(name=self.scope)
# validate params
self.pad = lambda x: x
if params["max_encoder_length"] <= 512:
logging.info("Switching to full attention for short sequences")
self.params["attention_type"] = "original_full"
if self.params["attention_type"] == "simulated_sparse" or self.params[
"attention_type"] == "block_sparse":
if params["max_encoder_length"] % params["block_size"]:
logging.info("Expand max_encoder_length to next multiple of block_size")
self.params["max_encoder_length"] = (
params["max_encoder_length"] // params["block_size"] +
1) * params["block_size"]
pad_size = self.params["max_encoder_length"] - params[
"max_encoder_length"]
paddings = [[0, 0], [0, pad_size]]
self.pad = lambda x: tf.pad(x, paddings)
with tf.compat.v1.variable_scope(self.scope, reuse=tf.compat.v1.AUTO_REUSE):
self.embeder = utils.EmbeddingLayer(
vocab_size=self.params["vocab_size"],
emb_dim=self.params["hidden_size"],
initializer=utils.create_initializer(
self.params["initializer_range"]),
scale_emb=self.params["rescale_embedding"],
use_token_type=False,
num_token_types=None,
use_position_embeddings=True,
max_position_embeddings=self.params["max_position_embeddings"],
dropout_prob=self.params["hidden_dropout_prob"])
self.encoder = encoder.EncoderStack(self.params)
self.decoder = decoder.DecoderStack(self.params)
def _encode(self, input_ids, training=None):
"""Generate continuous representation for ids.
Args:
input_ids: Int tensor with shape [batch_size, input_length].
training: Boolean indicating whether the call is training or inference.
Returns:
A float tensors of shape
[batch_size, input_length, hidden_size].
"""
# pad if needed
input_ids = self.pad(input_ids)
# Perform embedding lookup on the word ids.
input_embs = self.embeder(
input_ids, self.params["max_encoder_length"], training=training)
# Generate mask.
input_mask = tf.where(input_ids > 0,
tf.ones_like(input_ids), tf.zeros_like(input_ids))
# Run the stacked transformer.
encoder_output = self.encoder(input_embs, input_mask, training=training)
return encoder_output, input_mask
def _get_start_token_ids(self, tensor_for_shape):
start_token_id = 2
batch_size = utils.get_shape_list(tensor_for_shape)[0]
return tf.ones([batch_size], dtype=tf.int32) * start_token_id
def get_inputs_from_targets(self, targets, start_token_ids):
"""Converts target ids to input ids, i.e. adds <s> and removes last."""
length = tf.math.count_nonzero(targets, axis=1, dtype=tf.int32)
# Add start token ids.
inputs = tf.concat([tf.expand_dims(start_token_ids, axis=1), targets], 1)
# Remove </s> from the input.
mask = tf.sequence_mask(length, self.params["max_decoder_length"]+1,
dtype=tf.int32)
inputs = (mask * inputs)[:, :-1]
return inputs
def _decode(self, target_ids, target_mask, start_token_ids,
encoder_output, encoder_mask, training=None):
"""Compute likelihood of target tokens under the model.
Args:
target_ids: tensor with shape [batch_size, target_length, hidden_size]
target_mask: self-attention bias for decoder attention layer. [batch_size,
input_length]
start_token_ids: int32 tensor of shape [batch_size] for first decoder
input.
encoder_output: Continuous representation of input sequence. Float tensor
with shape [batch_size, input_length, hidden_size].
encoder_mask: Float tensor with shape [batch_size, input_length].
training: Boolean indicating whether the call is training or inference.
Returns:
A dict containing the output ids, the output log-probs, the output logits.
"""
# Prepare inputs to decoder layers by shifting targets, embedding ids,
# adding positional encoding and applying dropout.
input_ids = self.get_inputs_from_targets(target_ids, start_token_ids)
input_embs = self.embeder(input_ids, self.params["max_decoder_length"],
training=training)
outputs = self.decoder(input_embs, target_mask,
encoder_output, encoder_mask, training=training)
logits = self.embeder.linear(outputs)
output_ids = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
log_probs = -tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_ids, logits=logits)
log_probs = tf.where(target_ids > 0, log_probs,
tf.zeros_like(log_probs, tf.float32))
return (tf.identity(log_probs, name="log_probs"),
tf.identity(logits, name="logits"),
tf.cast(output_ids, tf.int32, name="pred_ids"),)
def _init_cache(self, batch_size):
"""Initialize cache for decoding."""
max_decode_len = self.params["max_decoder_length"]
num_heads = self.params["num_attention_heads"]
head_size = int(self.params["hidden_size"] / num_heads)
cache = {}
for layer in range(self.params["num_hidden_layers"]):
cache["layer_%d" % layer] = {
"k": tf.zeros([batch_size, num_heads, max_decode_len, head_size]),
"v": tf.zeros([batch_size, num_heads, max_decode_len, head_size]),
}
return cache
def _get_symbols_to_logits_fn(self, decoder_self_attention_mask):
"""Returns a decoding function that calculates logits of the next tokens."""
max_decode_len = self.params["max_decoder_length"]
def _symbols_to_logits_fn(target_ids, cache, i):
"""Generate logits for next candidate IDs.
Args:
target_ids: Current decoded sequences. int tensor with shape
[batch_size, i + 1]
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
i: Loop index
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
decoder_input = tf.slice(target_ids,
[0, tf.maximum(tf.cast(0, i.dtype), i - 1)],
[target_ids.shape[0], 1])
self_attention_mask = tf.slice(decoder_self_attention_mask, [0, 0, i, 0],
[1, 1, 1, max_decode_len])
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self.embeder(
decoder_input, 1, start_pos=i, training=False)
decoder_output = self.decoder(
decoder_input, self_attention_mask,
cache.get("encoder_output"), cache.get("encoder_mask"),
cache=cache, decode_i=i, training=False)
logits = self.embeder.linear(decoder_output)
logits = tf.squeeze(logits, axis=[1])
return logits
return _symbols_to_logits_fn
def _predict(self, target_ids, target_mask, start_token_ids,
encoder_output, encoder_mask):
"""Beam decode output tokens and probabilities.
Args:
target_ids: tensor with shape [batch_size, target_length, hidden_size]
target_mask: self-attention bias for decoder attention layer. [batch_size,
input_length]
start_token_ids: int32 tensor of shape [batch_size] for first decoder
input.
encoder_output: Continuous representation of input sequence. Float
tensor with shape [batch_size, target_length, num_hidden_layers,
hidden_size]
encoder_mask: bias for encoder-decoder attention layer. [batch_size,
input_length]
Returns:
A tuple of:
`log_probs`: Log-probs of output tokens.
`logits`: Logits of output tokens.
`pred_ids`: Predicted output sequence.
"""
batch_size = utils.get_shape_list(start_token_ids)[0]
end_token_id = 1
# One step logit function.
symbols_to_logits_fn = self._get_symbols_to_logits_fn(target_mask)
# Create cache storing decoder attention values for each layer.
cache = self._init_cache(batch_size)
if encoder_output is not None:
# Add encoder output and attention bias to the cache.
cache["encoder_output"] = encoder_output
cache["encoder_mask"] = encoder_mask
decoded_ids = decoder.left2right_decode(
symbols_to_logits_fn,
start_token_ids,
cache,
batch_size,
self.params["max_decoder_length"],
vocab_size=self.params["vocab_size"],
beam_size=self.params["beam_size"],
beam_start=5,
beam_alpha=self.params["alpha"],
beam_min=0,
beam_max=-1,
eos_id=end_token_id)
# Get the top sequence for each batch element
output_ids = tf.cast(decoded_ids, tf.int32, name="pred_ids")
# Calculate log probs for given sequence if available.
calc_ids = output_ids if target_ids is None else target_ids
output_log_probs, output_logits, _ = self._decode(
calc_ids, target_mask, start_token_ids,
encoder_output, encoder_mask, training=False)
return (output_log_probs, output_logits, output_ids)
def _decode_and_predict(self, target_ids, encoder_output, encoder_mask,
training=None):
"""Decodes a sequence given the input and the encoder.
Args:
target_ids: tensor with shape [batch_size, target_length, hidden_size]
encoder_output: Continuous representation of input sequence. Float
tensor with shape [batch_size, target_length, num_hidden_layers,
hidden_size]
encoder_mask: bias for encoder-decoder attention layer. [batch_size,
input_length]
training: Boolean indicating whether the call is training or inference.
Returns:
A tuple of:
`log_probs`: Log-probs of output tokens.
`logits`: Logits of output tokens.
`pred_ids`: Predicted output sequence.
"""
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
start_token_ids = self._get_start_token_ids(encoder_output)
# Create causal self-attention mask for decoder.
target_mask = decoder.create_self_attention_mask(
self.params["max_decoder_length"])
predictions = {}
if training:
predictions = self._decode(target_ids, target_mask, start_token_ids,
encoder_output, encoder_mask, training=True)
else:
predictions = self._predict(target_ids, target_mask, start_token_ids,
encoder_output, encoder_mask)
return predictions
def call(self,
input_ids,
target_ids=None,
training=None):
# Run the inputs through the encoder layer to map the symbol
# representations to continuous representations.
encoder_output, encoder_mask = self._encode(input_ids, training=training)
# Decode.
predictions = self._decode_and_predict(target_ids, encoder_output,
encoder_mask, training=training)
return predictions, encoder_output
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run summarization fine-tuning for BigBird.."""
import os
import time
from absl import app
from absl import logging
from bigbird.core import flags
from bigbird.core import modeling
from bigbird.core import optimization
from bigbird.core import utils
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_text as tft
from rouge_score import rouge_scorer
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", "tfds://scientific_papers/pubmed",
"The input data dir. Should contain the TFRecord files. "
"Can be TF Dataset with prefix tfds://")
flags.DEFINE_string(
"output_dir", "/tmp/bigb",
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BigBird model).")
flags.DEFINE_integer(
"max_encoder_length", 128,
"The maximum total input sequence length after SentencePiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"max_decoder_length", 128,
"The maximum total input sequence length after SentencePiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"substitute_newline", None,
"Replace newline charachter from text with supplied string.")
flags.DEFINE_bool(
"do_train", True,
"Whether to run training.")
flags.DEFINE_bool(
"do_eval", False,
"Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_export", False,
"Whether to export the model as TF SavedModel.")
flags.DEFINE_integer(
"train_batch_size", 8,
"Local batch size for training. "
"Total batch size will be multiplied by number gpu/tpu cores available.")
flags.DEFINE_integer(
"eval_batch_size", 8,
"Local batch size for eval. "
"Total batch size will be multiplied by number gpu/tpu cores available.")
flags.DEFINE_string(
"optimizer", "Adafactor",
"Optimizer to use. Can be Adafactor, Adam, and AdamWeightDecay.")
flags.DEFINE_float(
"learning_rate", 0.32,
"The initial learning rate for Adam.")
flags.DEFINE_integer(
"num_train_steps", 1000,
"Total number of training steps to perform.")
flags.DEFINE_integer(
"num_warmup_steps", 100,
"Number of steps to perform linear warmup.")
flags.DEFINE_integer(
"save_checkpoints_steps", 2000,
"How often to save the model checkpoint.")
flags.DEFINE_integer(
"max_eval_steps", 100,
"Maximum number of eval steps.")
flags.DEFINE_bool(
"couple_encoder_decoder", False,
"Whether to tie encoder and decoder weights.")
flags.DEFINE_integer(
"beam_size", 5,
"Beam size for decoding.")
flags.DEFINE_float(
"alpha", 0.8,
"Strength of length normalization for beam search.")
flags.DEFINE_float(
"label_smoothing", 0.1,
"Label smoothing for prediction cross entropy loss.")
def input_fn_builder(data_dir, vocab_model_file, max_encoder_length,
max_decoder_length, substitute_newline, is_training,
tmp_dir=None):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def _decode_record(record):
"""Decodes a record to a TensorFlow example."""
name_to_features = {
"document": tf.io.FixedLenFeature([], tf.string),
"summary": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(record, name_to_features)
return example["document"], example["summary"]
def _tokenize_example(document, summary):
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
if substitute_newline:
document = tf.strings.regex_replace(document, "\n", substitute_newline)
# Remove space before special tokens.
document = tf.strings.regex_replace(document, r" ([<\[]\S+[>\]])", b"\\1")
document_ids = tokenizer.tokenize(document)
if isinstance(document_ids, tf.RaggedTensor):
document_ids = document_ids.to_tensor(0)
document_ids = document_ids[:max_encoder_length]
# Remove newline optionally
if substitute_newline:
summary = tf.strings.regex_replace(summary, "\n", substitute_newline)
# Remove space before special tokens.
summary = tf.strings.regex_replace(summary, r" ([<\[]\S+[>\]])", b"\\1")
summary_ids = tokenizer.tokenize(summary)
# Add [EOS] (1) special tokens.
suffix = tf.constant([1])
summary_ids = tf.concat([summary_ids, suffix], axis=0)
if isinstance(summary_ids, tf.RaggedTensor):
summary_ids = summary_ids.to_tensor(0)
summary_ids = summary_ids[:max_decoder_length]
return document_ids, summary_ids
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# Load dataset and handle tfds separately
split = "train" if is_training else "validation"
if "tfds://" == data_dir[:7]:
d = tfds.load(data_dir[7:], split=split, data_dir=tmp_dir,
shuffle_files=is_training, as_supervised=True)
else:
input_files = tf.io.gfile.glob(
os.path.join(data_dir, "{}.tfrecord*".format(split)))
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.shuffle(buffer_size=len(input_files))
# Non deterministic mode means that the interleaving is not exact.
# This adds even more randomness to the training pipeline.
d = d.interleave(tf.data.TFRecordDataset,
deterministic=False,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
d = tf.data.TFRecordDataset(input_files)
d = d.map(_decode_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=is_training)
d = d.map(_tokenize_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=is_training)
if is_training:
d = d.shuffle(buffer_size=10000, reshuffle_each_iteration=True)
d = d.repeat()
d = d.padded_batch(batch_size, ([max_encoder_length], [max_decoder_length]),
drop_remainder=True) # For static shape
return d
return input_fn
def serving_input_fn_builder(batch_size, max_encoder_length,
vocab_model_file, substitute_newline):
"""Creates an `input_fn` closure for exported SavedModel."""
def dynamic_padding(inp, min_size):
pad_size = tf.maximum(min_size - tf.shape(inp)[1], 0)
paddings = [[0, 0], [0, pad_size]]
return tf.pad(inp, paddings)
def input_fn():
# text input
text = tf.compat.v1.placeholder(tf.string, [batch_size], name="input_text")
# text tokenize
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
if substitute_newline:
text = tf.strings.regex_replace(text, "\n", substitute_newline)
# Remove space before special tokens.
text = tf.strings.regex_replace(text, r" ([<\[]\S+[>\]])", b"\\1")
ids = tokenizer.tokenize(text)
if isinstance(ids, tf.RaggedTensor):
ids = ids.to_tensor(0)
# text padding: Pad only if necessary and reshape properly
padded_ids = dynamic_padding(ids, max_encoder_length)
ids = tf.slice(padded_ids, [0, 0], [batch_size, max_encoder_length])
receiver_tensors = {"input": text}
features = {"input_ids": tf.cast(ids, tf.int32, name="input_ids")}
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=receiver_tensors)
return input_fn
def model_fn_builder(transformer_config):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
if isinstance(features, dict):
if not labels and "target_ids" in features:
labels = features["target_ids"]
features = features["input_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.TransformerModel(transformer_config)
(llh, logits, pred_ids), _ = model(features, target_ids=labels,
training=is_training)
total_loss = padded_cross_entropy_loss(
logits, labels,
transformer_config["label_smoothing"],
transformer_config["vocab_size"])
tvars = tf.compat.v1.trainable_variables()
utils.log_variables(tvars, transformer_config["ckpt_var_list"])
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = optimization.get_linear_warmup_rsqrt_decay_lr(
init_lr=transformer_config["learning_rate"],
hidden_size=transformer_config["hidden_size"],
num_warmup_steps=transformer_config["num_warmup_steps"])
optimizer = optimization.get_optimizer(transformer_config, learning_rate)
global_step = tf.compat.v1.train.get_global_step()
if not transformer_config["use_bias"]:
logging.info("Fixing position embedding, i.e. not trainable.")
posemb = "pegasus/embeddings/position_embeddings"
tvars = list(filter(lambda v: v.name.split(":")[0] != posemb, tvars))
gradients = optimizer.compute_gradients(total_loss, tvars)
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
host_call=utils.add_scalars_to_summary(
transformer_config["output_dir"],
{"learning_rate": learning_rate}))
elif mode == tf.estimator.ModeKeys.EVAL:
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(transformer_config["vocab_model_file"],
"rb").read())
def rouge_py_func(label_sent, pred_sent):
"""Approximate ROUGE scores, always run externally for final scores."""
scorer = rouge_scorer.RougeScorer(
["rouge1", "rouge2", "rougeLsum"],
use_stemmer=True)
r1, r2, rl = 0.0, 0.0, 0.0
for ls, ps in zip(label_sent, pred_sent):
score = scorer.score(ls.decode("utf-8"), ps.decode("utf-8"))
r1 += score["rouge1"].fmeasure
r2 += score["rouge2"].fmeasure
rl += score["rougeLsum"].fmeasure
return r1/len(label_sent), r2/len(label_sent), rl/len(label_sent)
def metric_fn(loss, log_probs, label_ids, pred_ids):
loss = tf.compat.v1.metrics.mean(values=loss)
log_probs = tf.compat.v1.metrics.mean(
values=log_probs,
weights=tf.cast(tf.not_equal(label_ids, 0), tf.float32))
metric_dict = {
"prediction_loss": loss,
"log_likelihood": log_probs,
}
if not transformer_config["use_tpu"]:
# Approximate ROUGE scores if not running on tpus.
# Always run externally for final scores.
label_sent = tokenizer.detokenize(label_ids)
label_sent = tf.strings.regex_replace(label_sent, r"([<\[]\S+[>\]])",
b" \\1")
pred_sent = tokenizer.detokenize(pred_ids)
pred_sent = tf.strings.regex_replace(pred_sent, r"([<\[]\S+[>\]])",
b" \\1")
if transformer_config["substitute_newline"]:
label_sent = tf.strings.regex_replace(
label_sent, transformer_config["substitute_newline"], "\n")
pred_sent = tf.strings.regex_replace(
pred_sent, transformer_config["substitute_newline"], "\n")
rouge_value = tf.compat.v1.py_func(
func=rouge_py_func,
inp=[label_sent, pred_sent],
Tout=[tf.float64, tf.float64, tf.float64],
stateful=False)
rouge_value = tf.cast(rouge_value, tf.float32)
rouge1 = tf.compat.v1.metrics.mean(values=rouge_value[0])
rouge2 = tf.compat.v1.metrics.mean(values=rouge_value[1])
rougeL = tf.compat.v1.metrics.mean(values=rouge_value[2]) # pylint: disable=invalid-name
metric_dict.update({
"eval/Rouge-1": rouge1,
"eval/Rouge-2": rouge2,
"eval/Rouge-L": rougeL,
})
return metric_dict
eval_metrics = (metric_fn,
[total_loss, llh, labels, pred_ids])
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics)
else:
prediction_dict = {"pred_ids": pred_ids}
if not transformer_config["use_tpu"]:
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(transformer_config["vocab_model_file"],
"rb").read())
pred_sent = tokenizer.detokenize(pred_ids)
# Add a space before special tokens.
pred_sent = tf.strings.regex_replace(
pred_sent, r"([<\[]\S+[>\]])", b" \\1")
if transformer_config["substitute_newline"]:
pred_sent = tf.strings.regex_replace(
pred_sent, transformer_config["substitute_newline"], "\n")
prediction_dict.update({"pred_sent": pred_sent})
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions=prediction_dict)
return output_spec
return model_fn
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss"):
if labels is not None:
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy"):
confidence = 1.0 - smoothing
vocab_float = tf.cast(vocab_size - 1, tf.float32)
low_confidence = (1.0 - confidence) / vocab_float
soft_targets = tf.one_hot(
labels,
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.math.log(confidence) + vocab_float *
low_confidence * tf.math.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
loss = tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
else:
loss = tf.constant(0.0)
return loss
def main(_):
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_export:
raise ValueError(
"At least one of `do_train`, `do_eval` must be True.")
transformer_config = flags.as_dictionary()
if FLAGS.max_encoder_length > transformer_config["max_position_embeddings"]:
raise ValueError(
"Cannot use sequence length %d because the model "
"was only trained up to sequence length %d" %
(FLAGS.max_encoder_length,
transformer_config["max_position_embeddings"]))
tf.io.gfile.makedirs(FLAGS.output_dir)
if FLAGS.do_train:
flags.save(os.path.join(FLAGS.output_dir, "summarization.config"))
model_fn = model_fn_builder(transformer_config)
estimator = utils.get_estimator(transformer_config, model_fn)
tmp_data_dir = os.path.join(FLAGS.output_dir, "tfds")
if FLAGS.do_train:
logging.info("***** Running training *****")
logging.info(" Batch size = %d", estimator.train_batch_size)
logging.info(" Num steps = %d", FLAGS.num_train_steps)
train_input_fn = input_fn_builder(
data_dir=FLAGS.data_dir,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
max_decoder_length=FLAGS.max_decoder_length,
substitute_newline=FLAGS.substitute_newline,
tmp_dir=tmp_data_dir,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
logging.info("***** Running evaluation *****")
logging.info(" Batch size = %d", estimator.eval_batch_size)
eval_input_fn = input_fn_builder(
data_dir=FLAGS.data_dir,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
max_decoder_length=FLAGS.max_decoder_length,
substitute_newline=FLAGS.substitute_newline,
tmp_dir=tmp_data_dir,
is_training=False)
# Run continuous evaluation for latest checkpoint as training progresses.
last_evaluated = None
while True:
latest = tf.train.latest_checkpoint(FLAGS.output_dir)
if latest == last_evaluated:
if not latest:
logging.info("No checkpoints found yet.")
else:
logging.info("Latest checkpoint %s already evaluated.", latest)
time.sleep(300)
continue
else:
logging.info("Evaluating check point %s", latest)
last_evaluated = latest
current_step = int(os.path.basename(latest).split("-")[1])
output_eval_file = os.path.join(
FLAGS.output_dir, "eval_results_{}.txt".format(current_step))
result = estimator.evaluate(input_fn=eval_input_fn,
steps=FLAGS.max_eval_steps,
checkpoint_path=latest)
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
logging.info("***** Eval results *****")
for key in sorted(result.keys()):
logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_export:
logging.info("***** Running export *****")
serving_input_fn = serving_input_fn_builder(
batch_size=FLAGS.eval_batch_size,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
substitute_newline=FLAGS.substitute_newline)
estimator.export_saved_model(
os.path.join(FLAGS.output_dir, "export"), serving_input_fn)
if __name__ == "__main__":
tf.compat.v1.disable_v2_behavior()
tf.compat.v1.enable_resource_variables()
app.run(main)
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run classification fine-tuning for BigBird."""
import os
from absl import app
from absl import logging
from bigbird.core import flags
from bigbird.core import modeling
from bigbird.core import optimization
from bigbird.core import utils
from natsort import natsorted
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_text as tft
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", "tfds://imdb_reviews/plain_text",
"The input data dir. Should contain the TFRecord files. "
"Can be TF Dataset with prefix tfds://")
flags.DEFINE_string(
"output_dir", "/tmp/bigb",
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BigBird model).")
flags.DEFINE_integer(
"max_encoder_length", 512,
"The maximum total input sequence length after SentencePiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"substitute_newline", None,
"Replace newline charachter from text with supplied string.")
flags.DEFINE_bool(
"do_train", True,
"Whether to run training.")
flags.DEFINE_bool(
"do_eval", False,
"Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_export", False,
"Whether to export the model as TF SavedModel.")
flags.DEFINE_integer(
"train_batch_size", 8,
"Local batch size for training. "
"Total batch size will be multiplied by number gpu/tpu cores available.")
flags.DEFINE_integer(
"eval_batch_size", 8,
"Local batch size for eval. "
"Total batch size will be multiplied by number gpu/tpu cores available.")
flags.DEFINE_string(
"optimizer", "AdamWeightDecay",
"Optimizer to use. Can be Adafactor, Adam, and AdamWeightDecay.")
flags.DEFINE_float(
"learning_rate", 1e-5,
"The initial learning rate for Adam.")
flags.DEFINE_integer(
"num_train_steps", 16000,
"Total number of training steps to perform.")
flags.DEFINE_integer(
"num_warmup_steps", 1000,
"Number of steps to perform linear warmup.")
flags.DEFINE_integer(
"save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer(
"num_labels", 2,
"Number of ways to classify.")
def input_fn_builder(data_dir, vocab_model_file, max_encoder_length,
substitute_newline, is_training, tmp_dir=None):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def _decode_record(record):
"""Decodes a record to a TensorFlow example."""
name_to_features = {
"text": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(record, name_to_features)
return example
def _tokenize_example(example):
text, label = example["text"], example["label"]
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
if substitute_newline:
text = tf.strings.regex_replace(text, "\n", substitute_newline)
ids = tokenizer.tokenize(text)
ids = ids[:max_encoder_length - 2]
# Add [CLS] (65) and [SEP] (66) special tokens.
prefix = tf.constant([65])
suffix = tf.constant([66])
ids = tf.concat([prefix, ids, suffix], axis=0)
if isinstance(ids, tf.RaggedTensor):
ids = ids.to_tensor(0)
# tf.Example only supports tf.int64, but the TPU is better with tf.int32.
label = tf.cast(label, tf.int32)
return ids, label
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
tpu_context = params.get("context", None)
seed = 0
# Load dataset and handle tfds separately
split = "train" if is_training else "test"
if "tfds://" == data_dir[:7]:
d = tfds.load(data_dir[7:], split=split,
shuffle_files=is_training,
data_dir=tmp_dir)
else:
input_files = tf.io.gfile.glob(
os.path.join(data_dir, "{}.tfrecord*".format(split)))
# Classification datasets are small so parallel interleaved reading
# won't buy us much.
d = tf.data.TFRecordDataset(input_files)
d = d.map(_decode_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=is_training)
d = d.map(_tokenize_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=is_training)
# Tokenize and batch dataset by sentencepiece
if is_training:
# Classification datasets are usually small
# and interleaving files may not be effective.
# So to ensure different data in a multi-host setup
# we explicitly shard the dataset by host id.
if tpu_context: # ensuring different data in multi-host setup
d = d.shard(tpu_context.num_hosts, tpu_context.current_host)
seed = tpu_context.current_host
d = d.shuffle(buffer_size=10000, seed=seed,
reshuffle_each_iteration=True)
d = d.repeat()
d = d.padded_batch(batch_size, ([max_encoder_length], []),
drop_remainder=True) # For static shape
return d
return input_fn
def serving_input_fn_builder(batch_size, max_encoder_length,
vocab_model_file, substitute_newline):
"""Creates an `input_fn` closure for exported SavedModel."""
def dynamic_padding(inp, min_size):
pad_size = tf.maximum(min_size - tf.shape(inp)[1], 0)
paddings = [[0, 0], [0, pad_size]]
return tf.pad(inp, paddings)
def input_fn():
# text input
text = tf.compat.v1.placeholder(tf.string, [batch_size], name="input_text")
# text tokenize
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
if substitute_newline:
text = tf.strings.regex_replace(text, "\n", substitute_newline)
ids = tokenizer.tokenize(text)
ids = ids[:, :max_encoder_length - 2]
# Add [CLS] and [SEP] special tokens.
prefix = tf.repeat(tf.constant([[65]]), batch_size, axis=0)
suffix = tf.repeat(tf.constant([[66]]), batch_size, axis=0)
ids = tf.concat([prefix, ids, suffix], axis=1)
if isinstance(ids, tf.RaggedTensor):
ids = ids.to_tensor(0)
# text padding: Pad only if necessary and reshape properly
padded_ids = dynamic_padding(ids, max_encoder_length)
ids = tf.slice(padded_ids, [0, 0], [batch_size, max_encoder_length])
receiver_tensors = {"input": text}
features = {"input_ids": tf.cast(ids, tf.int32, name="input_ids")}
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=receiver_tensors)
return input_fn
def model_fn_builder(bert_config):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
if isinstance(features, dict):
if not labels and "labels" in features:
labels = features["labels"]
features = features["input_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(bert_config)
headl = ClassifierLossLayer(
bert_config["hidden_size"], bert_config["num_labels"],
bert_config["hidden_dropout_prob"],
utils.create_initializer(bert_config["initializer_range"]),
name=bert_config["scope"]+"/classifier")
_, pooled_output = model(features, training=is_training)
total_loss, log_probs = headl(pooled_output, labels, is_training)
tvars = tf.compat.v1.trainable_variables()
utils.log_variables(tvars, bert_config["ckpt_var_list"])
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = optimization.get_linear_warmup_linear_decay_lr(
init_lr=bert_config["learning_rate"],
num_train_steps=bert_config["num_train_steps"],
num_warmup_steps=bert_config["num_warmup_steps"])
optimizer = optimization.get_optimizer(bert_config, learning_rate)
global_step = tf.compat.v1.train.get_or_create_global_step()
gradients = optimizer.compute_gradients(total_loss, tvars)
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
host_call=utils.add_scalars_to_summary(
bert_config["output_dir"], {"learning_rate": learning_rate}))
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(loss_value, label_ids, log_probs):
loss = tf.compat.v1.metrics.mean(values=loss_value)
predictions = tf.argmax(log_probs, axis=-1, output_type=tf.int32)
accuracy = tf.compat.v1.metrics.accuracy(
labels=label_ids, predictions=predictions)
p1, p1_op = tf.compat.v1.metrics.precision_at_k(
labels=tf.cast(label_ids, tf.int64), predictions=log_probs, k=1)
r1, r1_op = tf.compat.v1.metrics.recall_at_k(
labels=tf.cast(label_ids, tf.int64), predictions=log_probs, k=1)
f11 = tf.math.divide_no_nan(2*p1*r1, p1+r1)
metric_dict = {
"P@1": (p1, p1_op),
"R@1": (r1, r1_op),
"f1@1": (f11, tf.no_op()),
"classification_accuracy": accuracy,
"classification_loss": loss,
}
return metric_dict
eval_metrics = (metric_fn,
[tf.expand_dims(total_loss, 0), labels, log_probs])
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics)
else:
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"log-probabilities": log_probs})
return output_spec
return model_fn
class ClassifierLossLayer(tf.keras.layers.Layer):
"""Final classifier layer with loss."""
def __init__(self,
hidden_size,
num_labels,
dropout_prob=0.0,
initializer=None,
use_bias=True,
name="classifier"):
super(ClassifierLossLayer, self).__init__(name=name)
self.hidden_size = hidden_size
self.num_labels = num_labels
self.initializer = initializer
self.dropout = tf.keras.layers.Dropout(dropout_prob)
self.use_bias = use_bias
with tf.compat.v1.variable_scope(name):
self.w = tf.compat.v1.get_variable(
name="kernel",
shape=[self.hidden_size, self.num_labels],
initializer=self.initializer)
if self.use_bias:
self.b = tf.compat.v1.get_variable(
name="bias",
shape=[self.num_labels],
initializer=tf.zeros_initializer)
else:
self.b = None
def call(self, input_tensor, labels=None, training=None):
input_tensor = self.dropout(input_tensor, training)
logits = tf.matmul(input_tensor, self.w)
if self.use_bias:
logits = tf.nn.bias_add(logits, self.b)
log_probs = tf.nn.log_softmax(logits, axis=-1)
if labels is not None:
one_hot_labels = tf.one_hot(labels, depth=self.num_labels,
dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
else:
loss = tf.constant(0.0)
return loss, log_probs
def main(_):
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_export:
raise ValueError(
"At least one of `do_train`, `do_eval` must be True.")
bert_config = flags.as_dictionary()
if FLAGS.max_encoder_length > bert_config["max_position_embeddings"]:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_encoder_length, bert_config["max_position_embeddings"]))
tf.io.gfile.makedirs(FLAGS.output_dir)
if FLAGS.do_train:
flags.save(os.path.join(FLAGS.output_dir, "classifier.config"))
model_fn = model_fn_builder(bert_config)
estimator = utils.get_estimator(bert_config, model_fn)
tmp_data_dir = os.path.join(FLAGS.output_dir, "tfds")
if FLAGS.do_train:
logging.info("***** Running training *****")
logging.info(" Batch size = %d", estimator.train_batch_size)
logging.info(" Num steps = %d", FLAGS.num_train_steps)
train_input_fn = input_fn_builder(
data_dir=FLAGS.data_dir,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
substitute_newline=FLAGS.substitute_newline,
tmp_dir=tmp_data_dir,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
logging.info("***** Running evaluation *****")
logging.info(" Batch size = %d", estimator.eval_batch_size)
eval_input_fn = input_fn_builder(
data_dir=FLAGS.data_dir,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
substitute_newline=FLAGS.substitute_newline,
tmp_dir=tmp_data_dir,
is_training=False)
if FLAGS.use_tpu:
with tf.compat.v1.Session() as sess:
eval_steps = eval_input_fn({
"batch_size": estimator.eval_batch_size
}).cardinality().eval(session=sess)
else:
eval_steps = None
# Run evaluation for each new checkpoint.
all_ckpts = [
v.split(".meta")[0] for v in tf.io.gfile.glob(
os.path.join(FLAGS.output_dir, "model.ckpt*.meta"))
]
all_ckpts = natsorted(all_ckpts)
for ckpt in all_ckpts:
current_step = int(os.path.basename(ckpt).split("-")[1])
output_eval_file = os.path.join(
FLAGS.output_dir, "eval_results_{}.txt".format(current_step))
result = estimator.evaluate(input_fn=eval_input_fn,
checkpoint_path=ckpt,
steps=eval_steps)
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
logging.info("***** Eval results *****")
for key in sorted(result.keys()):
logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_export:
logging.info("***** Running export *****")
serving_input_fn = serving_input_fn_builder(
batch_size=FLAGS.eval_batch_size,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
substitute_newline=FLAGS.substitute_newline)
estimator.export_saved_model(
os.path.join(FLAGS.output_dir, "export"), serving_input_fn)
if __name__ == "__main__":
tf.compat.v1.disable_v2_behavior()
tf.compat.v1.enable_resource_variables()
app.run(main)
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 The BigBird Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence pre-training for BigBird."""
import os
import time
from absl import app
from absl import logging
from bigbird.core import flags
from bigbird.core import modeling
from bigbird.core import optimization
from bigbird.core import utils
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_text as tft
import sentencepiece as spm
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", "tfds://wiki40b/en",
"The input data dir. Should contain the TFRecord files. "
"Can be TF Dataset with prefix tfds://")
flags.DEFINE_string(
"output_dir", "/tmp/bigb",
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BigBird model).")
flags.DEFINE_integer(
"max_encoder_length", 512,
"The maximum total input sequence length after SentencePiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 75,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_float(
"masked_lm_prob", 0.15,
"Masked LM probability.")
flags.DEFINE_string(
"substitute_newline", " ",
"Replace newline charachter from text with supplied string.")
flags.DEFINE_bool(
"do_train", True,
"Whether to run training.")
flags.DEFINE_bool(
"do_eval", False,
"Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_export", False,
"Whether to export the model as TF SavedModel.")
flags.DEFINE_integer(
"train_batch_size", 4,
"Local batch size for training. "
"Total batch size will be multiplied by number gpu/tpu cores available.")
flags.DEFINE_integer(
"eval_batch_size", 4,
"Local batch size for eval. "
"Total batch size will be multiplied by number gpu/tpu cores available.")
flags.DEFINE_string(
"optimizer", "AdamWeightDecay",
"Optimizer to use. Can be Adafactor, Adam, and AdamWeightDecay.")
flags.DEFINE_float(
"learning_rate", 1e-4,
"The initial learning rate for Adam.")
flags.DEFINE_integer(
"num_train_steps", 100000,
"Total number of training steps to perform.")
flags.DEFINE_integer(
"num_warmup_steps", 10000,
"Number of steps to perform linear warmup.")
flags.DEFINE_integer(
"save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer(
"max_eval_steps", 100,
"Maximum number of eval steps.")
flags.DEFINE_bool(
"preprocessed_data", False,
"Whether TFRecord data is already tokenized and masked.")
flags.DEFINE_bool(
"use_nsp", False,
"Whether to use next sentence prediction loss.")
def input_fn_builder(data_dir, vocab_model_file, masked_lm_prob,
max_encoder_length, max_predictions_per_seq,
preprocessed_data, substitute_newline, is_training,
tmp_dir=None):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
sp_model = spm.SentencePieceProcessor()
sp_proto = tf.io.gfile.GFile(vocab_model_file, "rb").read()
sp_model.LoadFromSerializedProto(sp_proto)
vocab_size = sp_model.GetPieceSize()
word_start_subtoken = np.array(
[sp_model.IdToPiece(i)[0] == "▁" for i in range(vocab_size)])
feature_shapes = {
"input_ids": [max_encoder_length],
"segment_ids": [max_encoder_length],
"masked_lm_positions": [max_predictions_per_seq],
"masked_lm_ids": [max_predictions_per_seq],
"masked_lm_weights": [max_predictions_per_seq],
"next_sentence_labels": [1]
}
def _decode_record(record):
"""Decodes a record to a TensorFlow example."""
name_to_features = {
"input_ids":
tf.io.FixedLenFeature([max_encoder_length], tf.int64),
"segment_ids":
tf.io.FixedLenFeature([max_encoder_length], tf.int64),
"masked_lm_positions":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.io.FixedLenFeature([1], tf.int64),
}
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def do_masking(example):
text = example["text"]
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
if substitute_newline:
text = tf.strings.regex_replace(text, "\n", substitute_newline)
subtokens = tokenizer.tokenize(text)
(subtokens, masked_lm_positions, masked_lm_ids,
masked_lm_weights) = tf.compat.v1.py_func(
numpy_masking, [subtokens], [tf.int32, tf.int32, tf.int32, tf.float32],
stateful=False)
features = {
"input_ids": subtokens,
"segment_ids": tf.zeros_like(subtokens),
"masked_lm_positions": masked_lm_positions,
"masked_lm_ids": masked_lm_ids,
"masked_lm_weights": masked_lm_weights,
"next_sentence_labels": tf.zeros([1], dtype=tf.int64),
}
return features
def numpy_masking(subtokens):
# Find a random span in text
end_pos = max_encoder_length - 2 + np.random.randint(
max(1, len(subtokens) - max_encoder_length - 2))
start_pos = max(0, end_pos - max_encoder_length + 2)
subtokens = subtokens[start_pos:end_pos]
# The start might be inside a word so fix it
# such that span always starts at a word
word_begin_mark = word_start_subtoken[subtokens]
word_begins_pos = np.flatnonzero(word_begin_mark).astype(np.int32)
if word_begins_pos.size == 0:
# if no word boundary present, we do not do whole word masking
# and we fall back to random masking.
word_begins_pos = np.arange(len(subtokens), dtype=np.int32)
word_begin_mark = np.logical_not(word_begin_mark)
print(subtokens, start_pos, end_pos, word_begin_mark)
correct_start_pos = word_begins_pos[0]
subtokens = subtokens[correct_start_pos:]
word_begin_mark = word_begin_mark[correct_start_pos:]
word_begins_pos = word_begins_pos - correct_start_pos
num_tokens = len(subtokens)
# @e want to do whole word masking so split by word boundary
words = np.split(np.arange(num_tokens, dtype=np.int32), word_begins_pos)[1:]
assert len(words) == len(word_begins_pos)
# Decide elements to mask
num_to_predict = min(
max_predictions_per_seq,
max(1, int(round(len(word_begins_pos) * masked_lm_prob))))
masked_lm_positions = np.concatenate(np.random.choice(
np.array([[]] + words, dtype=np.object)[1:],
num_to_predict, replace=False), 0)
# but this might have excess subtokens than max_predictions_per_seq
if len(masked_lm_positions) > max_predictions_per_seq:
masked_lm_positions = masked_lm_positions[:max_predictions_per_seq+1]
# however last word can cross word boundaries, remove crossing words
truncate_masking_at = np.flatnonzero(
word_begin_mark[masked_lm_positions])[-1]
masked_lm_positions = masked_lm_positions[:truncate_masking_at]
# sort masking positions
masked_lm_positions = np.sort(masked_lm_positions)
masked_lm_ids = subtokens[masked_lm_positions]
# replance input token with [MASK] 80%, random 10%, or leave it as it is.
randomness = np.random.rand(len(masked_lm_positions))
mask_index = masked_lm_positions[randomness < 0.8]
random_index = masked_lm_positions[randomness > 0.9]
subtokens[mask_index] = 67 # id of masked token
subtokens[random_index] = np.random.randint( # ignore special tokens
101, vocab_size, len(random_index), dtype=np.int32)
# add [CLS] (65) and [SEP] (66) tokens
subtokens = np.concatenate([
np.array([65], dtype=np.int32), subtokens,
np.array([66], dtype=np.int32)
])
# pad everything to correct shape
pad_inp = max_encoder_length - num_tokens - 2
subtokens = np.pad(subtokens, [0, pad_inp], "constant")
pad_out = max_predictions_per_seq - len(masked_lm_positions)
masked_lm_weights = np.pad(
np.ones_like(masked_lm_positions, dtype=np.float32),
[0, pad_out], "constant")
masked_lm_positions = np.pad(
masked_lm_positions + 1, [0, pad_out], "constant")
masked_lm_ids = np.pad(masked_lm_ids, [0, pad_out], "constant")
return subtokens, masked_lm_positions, masked_lm_ids, masked_lm_weights
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# Load dataset and handle tfds separately
split = "train" if is_training else "test"
if "tfds://" == data_dir[:7]:
d = tfds.load(data_dir[7:], split=split,
shuffle_files=is_training,
data_dir=tmp_dir)
else:
input_files = tf.io.gfile.glob(
os.path.join(data_dir, "{}.tfrecord*".format(split)))
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.shuffle(buffer_size=len(input_files))
# Non deterministic mode means that the interleaving is not exact.
# This adds even more randomness to the training pipeline.
d = d.interleave(tf.data.TFRecordDataset,
deterministic=False,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
d = tf.data.TFRecordDataset(input_files)
if preprocessed_data:
d = d.map(_decode_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
d = d.map(do_masking,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_training:
d = d.shuffle(buffer_size=10000, reshuffle_each_iteration=True)
d = d.repeat()
d = d.padded_batch(batch_size, feature_shapes,
drop_remainder=True) # For static shape
return d
return input_fn
def serving_input_fn_builder(batch_size, max_encoder_length,
vocab_model_file, substitute_newline):
"""Creates an `input_fn` closure for exported SavedModel."""
def dynamic_padding(inp, min_size):
pad_size = tf.maximum(min_size - tf.shape(inp)[1], 0)
paddings = [[0, 0], [0, pad_size]]
return tf.pad(inp, paddings)
def input_fn():
# text input
text = tf.compat.v1.placeholder(tf.string, [batch_size], name="input_text")
# text tokenize
tokenizer = tft.SentencepieceTokenizer(
model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
if substitute_newline:
text = tf.strings.regex_replace(text, "\n", substitute_newline)
ids = tokenizer.tokenize(text)
if isinstance(ids, tf.RaggedTensor):
ids = ids.to_tensor(0)
# text padding: Pad only if necessary and reshape properly
padded_ids = dynamic_padding(ids, max_encoder_length)
ids = tf.slice(padded_ids, [0, 0], [batch_size, max_encoder_length])
receiver_tensors = {"input": text}
features = {"input_ids": tf.cast(ids, tf.int32, name="input_ids")}
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=receiver_tensors)
return input_fn
def model_fn_builder(bert_config):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(bert_config)
masked_lm = MaskedLMLayer(
bert_config["hidden_size"], bert_config["vocab_size"], model.embeder,
initializer=utils.create_initializer(bert_config["initializer_range"]),
activation_fn=utils.get_activation(bert_config["hidden_act"]))
next_sentence = NSPLayer(
bert_config["hidden_size"],
initializer=utils.create_initializer(bert_config["initializer_range"]))
sequence_output, pooled_output = model(
features["input_ids"], training=is_training,
token_type_ids=features.get("segment_ids"))
masked_lm_loss, masked_lm_log_probs = masked_lm(
sequence_output,
label_ids=features.get("masked_lm_ids"),
label_weights=features.get("masked_lm_weights"),
masked_lm_positions=features.get("masked_lm_positions"))
next_sentence_loss, next_sentence_log_probs = next_sentence(
pooled_output, features.get("next_sentence_labels"))
total_loss = masked_lm_loss
if bert_config["use_nsp"]:
total_loss += next_sentence_loss
tvars = tf.compat.v1.trainable_variables()
utils.log_variables(tvars, bert_config["ckpt_var_list"])
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = optimization.get_linear_warmup_linear_decay_lr(
init_lr=bert_config["learning_rate"],
num_train_steps=bert_config["num_train_steps"],
num_warmup_steps=bert_config["num_warmup_steps"])
optimizer = optimization.get_optimizer(bert_config, learning_rate)
global_step = tf.compat.v1.train.get_global_step()
gradients = optimizer.compute_gradients(total_loss, tvars)
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
host_call=utils.add_scalars_to_summary(
bert_config["output_dir"], {"learning_rate": learning_rate}))
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_loss_value, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_loss_value,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_accuracy = tf.compat.v1.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.compat.v1.metrics.mean(
values=masked_lm_loss_value)
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_accuracy = tf.compat.v1.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.compat.v1.metrics.mean(
values=next_sentence_loss_value)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_loss, masked_lm_log_probs, features["masked_lm_ids"],
features["masked_lm_weights"], next_sentence_loss,
next_sentence_log_probs, features["next_sentence_labels"]
])
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics)
else:
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"log-probabilities": masked_lm_log_probs,
"seq-embeddings": sequence_output
})
return output_spec
return model_fn
class MaskedLMLayer(tf.keras.layers.Layer):
"""Get loss and log probs for the masked LM."""
def __init__(self,
hidden_size,
vocab_size,
embeder,
initializer=None,
activation_fn=None,
name="cls/predictions"):
super(MaskedLMLayer, self).__init__(name=name)
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embeder = embeder
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
self.extra_layer = utils.Dense2dLayer(
hidden_size, hidden_size, initializer,
activation_fn, "transform")
self.norm_layer = utils.NormLayer(hidden_size, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.output_bias = tf.compat.v1.get_variable(
name+"/output_bias",
shape=[vocab_size],
initializer=tf.zeros_initializer())
@property
def trainable_weights(self):
self._trainable_weights = (self.extra_layer.trainable_weights +
self.norm_layer.trainable_weights +
[self.output_bias])
return self._trainable_weights
def call(self, input_tensor,
label_ids=None,
label_weights=None,
masked_lm_positions=None):
if masked_lm_positions is not None:
input_tensor = tf.gather(input_tensor, masked_lm_positions, batch_dims=1)
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
input_tensor = self.extra_layer(input_tensor)
input_tensor = self.norm_layer(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
logits = self.embeder.linear(input_tensor)
logits = tf.nn.bias_add(logits, self.output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
if label_ids is not None:
one_hot_labels = tf.one_hot(
label_ids, depth=self.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=-1)
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
else:
loss = tf.constant(0.0)
return loss, log_probs
class NSPLayer(tf.keras.layers.Layer):
"""Get loss and log probs for the next sentence prediction."""
def __init__(self,
hidden_size,
initializer=None,
name="cls/seq_relationship"):
super(NSPLayer, self).__init__(name=name)
self.hidden_size = hidden_size
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.compat.v1.variable_scope(name):
self.output_weights = tf.compat.v1.get_variable(
"output_weights",
shape=[2, hidden_size],
initializer=initializer)
self._trainable_weights.append(self.output_weights)
self.output_bias = tf.compat.v1.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
self._trainable_weights.append(self.output_bias)
def call(self, input_tensor, next_sentence_labels=None):
logits = tf.matmul(input_tensor, self.output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, self.output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
if next_sentence_labels is not None:
labels = tf.reshape(next_sentence_labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
else:
loss = tf.constant(0.0)
return loss, log_probs
def main(_):
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_export:
raise ValueError(
"At least one of `do_train`, `do_eval` must be True.")
bert_config = flags.as_dictionary()
if FLAGS.max_encoder_length > bert_config["max_position_embeddings"]:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_encoder_length, bert_config["max_position_embeddings"]))
tf.io.gfile.makedirs(FLAGS.output_dir)
if FLAGS.do_train:
flags.save(os.path.join(FLAGS.output_dir, "pretrain.config"))
model_fn = model_fn_builder(bert_config)
estimator = utils.get_estimator(bert_config, model_fn)
tmp_data_dir = os.path.join(FLAGS.output_dir, "tfds")
if FLAGS.do_train:
logging.info("***** Running training *****")
logging.info(" Batch size = %d", estimator.train_batch_size)
logging.info(" Num steps = %d", FLAGS.num_train_steps)
train_input_fn = input_fn_builder(
data_dir=FLAGS.data_dir,
vocab_model_file=FLAGS.vocab_model_file,
masked_lm_prob=FLAGS.masked_lm_prob,
max_encoder_length=FLAGS.max_encoder_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
preprocessed_data=FLAGS.preprocessed_data,
substitute_newline=FLAGS.substitute_newline,
tmp_dir=tmp_data_dir,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
logging.info("***** Running evaluation *****")
logging.info(" Batch size = %d", estimator.eval_batch_size)
eval_input_fn = input_fn_builder(
data_dir=FLAGS.data_dir,
vocab_model_file=FLAGS.vocab_model_file,
masked_lm_prob=FLAGS.masked_lm_prob,
max_encoder_length=FLAGS.max_encoder_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
preprocessed_data=FLAGS.preprocessed_data,
substitute_newline=FLAGS.substitute_newline,
tmp_dir=tmp_data_dir,
is_training=False)
# Run continuous evaluation for latest checkpoint as training progresses.
last_evaluated = None
while True:
latest = tf.train.latest_checkpoint(FLAGS.output_dir)
if latest == last_evaluated:
if not latest:
logging.info("No checkpoints found yet.")
else:
logging.info("Latest checkpoint %s already evaluated.", latest)
time.sleep(300)
continue
else:
logging.info("Evaluating check point %s", latest)
last_evaluated = latest
current_step = int(os.path.basename(latest).split("-")[1])
output_eval_file = os.path.join(
FLAGS.output_dir, "eval_results_{}.txt".format(current_step))
result = estimator.evaluate(input_fn=eval_input_fn,
steps=FLAGS.max_eval_steps,
checkpoint_path=latest)
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
logging.info("***** Eval results *****")
for key in sorted(result.keys()):
logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_export:
logging.info("***** Running export *****")
serving_input_fn = serving_input_fn_builder(
batch_size=FLAGS.eval_batch_size,
vocab_model_file=FLAGS.vocab_model_file,
max_encoder_length=FLAGS.max_encoder_length,
substitute_newline=FLAGS.substitute_newline)
estimator.export_saved_model(
os.path.join(FLAGS.output_dir, "export"), serving_input_fn)
if __name__ == "__main__":
tf.compat.v1.disable_v2_behavior()
tf.compat.v1.enable_resource_variables()
app.run(main)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple test for validating that the Atari env initializes."""
import datetime
import os
import shutil
from absl import flags
from batch_rl.baselines import train
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class AtariInitTest(tf.test.TestCase):
def setUp(self):
super(AtariInitTest, self).setUp()
FLAGS.base_dir = os.path.join(
'/tmp/batch_rl_tests',
datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S'))
FLAGS.gin_files = ['batch_rl/baselines/configs/dqn.gin']
# `num_iterations` set to zero to prevent runner execution.
FLAGS.gin_bindings = [
'Runner.num_iterations=0',
'WrappedReplayBuffer.replay_capacity = 100' # To prevent OOM.
]
FLAGS.alsologtostderr = True
def test_atari_init(self):
"""Tests that a DQN agent is initialized."""
train.main([])
shutil.rmtree(FLAGS.base_dir)
if __name__ == '__main__':
tf.test.main()
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to end tests for FixedReplayRunner."""
import datetime
import os
import shutil
from absl import flags
from batch_rl.fixed_replay import train
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class FixedReplayRunnerIntegrationTest(tf.test.TestCase):
"""Tests for Atari environment with various agents.
"""
def setUp(self):
super(FixedReplayRunnerIntegrationTest, self).setUp()
FLAGS.base_dir = os.path.join(
'/tmp/batch_rl_tests',
datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S'))
self._checkpoint_dir = os.path.join(FLAGS.base_dir, 'checkpoints')
self._logging_dir = os.path.join(FLAGS.base_dir, 'logs')
def quickFixedReplayREMFlags(self):
"""Assign flags for a quick run of FixedReplay agent."""
FLAGS.gin_bindings = [
"create_runner.schedule='continuous_train_and_eval'",
'FixedReplayRunner.training_steps=100',
'FixedReplayRunner.evaluation_steps=10',
'FixedReplayRunner.num_iterations=1',
'FixedReplayRunner.max_steps_per_episode=100',
]
FLAGS.alsologtostderr = True
FLAGS.gin_files = ['batch_rl/fixed_replay/configs/rem.gin']
FLAGS.agent_name = 'multi_head_dqn'
def verifyFilesCreated(self, base_dir):
"""Verify that files have been created."""
# Check checkpoint files
self.assertTrue(
os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0')))
self.assertTrue(
os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint')))
self.assertTrue(
os.path.exists(
os.path.join(self._checkpoint_dir,
'sentinel_checkpoint_complete.0')))
# Check log files
self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0')))
def testIntegrationFixedReplayREM(self):
"""Test the FixedReplayMultiHeadDQN agent."""
assert FLAGS.replay_dir is not None, 'Please provide a replay directory'
tf.logging.info('####### Training the REM agent #####')
tf.logging.info('####### REM base_dir: {}'.format(FLAGS.base_dir))
tf.logging.info('####### replay_dir: {}'.format(FLAGS.replay_dir))
self.quickFixedReplayREMFlags()
train.main([])
self.verifyFilesCreated(FLAGS.base_dir)
shutil.rmtree(FLAGS.base_dir)
if __name__ == '__main__':
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.