python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Custom video dataloader for ImageNet-P dataset, which comes in .mp4."""
import cv2
# from skvideo.io import VideoCapture
# import skvideo.io
import torch
import torch.utils.data as data
from torchvision.datasets.folder import DatasetFolder
from PIL import Image
import os
import os.path
import sys
class VideoFolder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=None):
super(VideoFolder, self).__init__(
root, loader, ['.mp4'], transform=transform, target_transform=target_transform)
self.vids = self.samples
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
# cap = VideoCapture(path)
cap = cv2.VideoCapture(path)
frames = []
while True:
# Capture frame-by-frame
ret, frame = cap.read()
if not ret: break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(self.transform(Image.fromarray(frame)).unsqueeze(0))
cap.release()
return torch.cat(frames, 0), target
| state-spaces-main | src/dataloaders/utils/video_loader.py |
"""Implementation of CIFAR augmentations. Not currently used.
Borrowed from https://github.com/hysts/pytorch_image_classification/tree/9ff4248905850c68aa9c09c17914307eb81769e7/pytorch_image_classification/transforms
"""
import torch
import numpy as np
import PIL
import PIL.Image
from PIL.Image import Image
class NpNormalize:
def __init__(self, mean: np.ndarray, std: np.ndarray):
self.mean = np.array(mean)
self.std = np.array(std)
def __call__(self, image: PIL.Image.Image) -> np.ndarray:
image = np.asarray(image).astype(np.float32) / 255.
image = (image - self.mean) / self.std
return image
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img
#
# class Cutout:
# def __init__(self, p=1.0, mask_size=16, cutout_inside=False, mask_color=0):
# # https://github.com/hysts/pytorch_image_classification/blob/9ff4248905850c68aa9c09c17914307eb81769e7/configs/augmentations/cifar/cutout.yaml
# self.p = p
# self.mask_size = mask_size
# self.cutout_inside = cutout_inside
# self.mask_color = mask_color
#
# self.mask_size_half = self.mask_size // 2
# self.offset = 1 if self.mask_size % 2 == 0 else 0
#
# def __call__(self, image: np.ndarray) -> np.ndarray:
# image = np.asarray(image).copy()
#
# if np.random.random() > self.p:
# return image
#
# h, w = image.shape[:2]
#
# if self.cutout_inside:
# cxmin = self.mask_size_half
# cxmax = w + self.offset - self.mask_size_half
# cymin = self.mask_size_half
# cymax = h + self.offset - self.mask_size_half
# else:
# cxmin, cxmax = 0, w + self.offset
# cymin, cymax = 0, h + self.offset
#
# cx = np.random.randint(cxmin, cxmax)
# cy = np.random.randint(cymin, cymax)
# xmin = cx - self.mask_size_half
# ymin = cy - self.mask_size_half
# xmax = xmin + self.mask_size
# ymax = ymin + self.mask_size
# xmin = max(0, xmin)
# ymin = max(0, ymin)
# xmax = min(w, xmax)
# ymax = min(h, ymax)
# image[ymin:ymax, xmin:xmax] = self.mask_color
# return image
class RandomErasing:
def __init__(self, p=0.5, max_attempt=20, sl=0.02, sh=0.4, rl=0.3, rh=1. / 0.3):
# https://github.com/hysts/pytorch_image_classification/blob/9ff4248905850c68aa9c09c17914307eb81769e7/configs/augmentations/cifar/random_erasing.yaml
self.p = 0.5
self.max_attempt = 20
self.sl, self.sh = 0.02, 0.4
self.rl = 0.3
self.rh = 1. / 0.3
def __call__(self, image: np.ndarray) -> np.ndarray:
image = np.asarray(image).copy()
if np.random.random() > self.p:
return image
h, w = image.shape[:2]
image_area = h * w
for _ in range(self.max_attempt):
mask_area = np.random.uniform(self.sl, self.sh) * image_area
aspect_ratio = np.random.uniform(self.rl, self.rh)
mask_h = int(np.sqrt(mask_area * aspect_ratio))
mask_w = int(np.sqrt(mask_area / aspect_ratio))
if mask_w < w and mask_h < h:
x0 = np.random.randint(0, w - mask_w)
y0 = np.random.randint(0, h - mask_h)
x1 = x0 + mask_w
y1 = y0 + mask_h
image[y0:y1, x0:x1] = np.random.uniform(0, 1)
break
return image
| state-spaces-main | src/dataloaders/utils/cifar_augmentations.py |
"""Implementation of Mixup from timm."""
import torch
from timm.data import Mixup
from timm.data.mixup import mixup_target
class TimmMixup(Mixup):
"""Wrap timm.data.Mixup that avoids the assert that batch size must be even."""
def __call__(self, x, target, *args):
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
# We move the assert from the beginning of the function to here
assert len(x) % 2 == 0, 'Batch size should be even when using this'
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
# Another change is to set the right device here
target = mixup_target(target, self.num_classes, lam, self.label_smoothing,
device=target.device)
return x, target, *args
| state-spaces-main | src/dataloaders/utils/timm_mixup.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from collections import Counter
from collections import OrderedDict
import torch
import src.utils as utils
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose:
print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
# Class OpenAIVocab has been adapted from
# https://github.com/cybertronai/transformer-xl/blob/master/utils/vocabulary.py
class OpenAIVocab(Vocab):
def __init__(self, max_size=None, vocab_file=None):
from transformers import GPT2Tokenizer
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
self.EOT = self.tokenizer.encoder['<|endoftext|>']
self.max_size = max_size
self.vocab_file = vocab_file
pad = 8
vocab_size = len(self.tokenizer)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:09d}'
self.tokenizer.add_tokens([token])
def __len__(self):
return len(self.tokenizer)
def count_file(self, path, verbose=False, add_eos=False):
# TODO: train from scratch, respect self.max_size
pass
def build_vocab(self):
pass
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False) -> torch.LongTensor:
cached = path + '.bpe'
if os.path.exists(cached):
return torch.load(cached)
print(f'encoding file {path} ...')
assert os.path.exists(path), f"{path} doesn't exist"
with open(path, encoding='utf-8') as f:
# Suppress warnings about length.
with open(os.devnull, "w") as devnull, contextlib.redirect_stderr(devnull):
out = torch.LongTensor(self.tokenizer.encode(f.read()) + [self.EOT])
with utils.distributed.sync_workers() as rank:
if rank == 0:
torch.save(out, cached)
return out
def tokenize(self, line, add_eos=False, add_double_eos=False):
return self.tokenizer.encode(line)
def convert_to_tensor(self, symbols):
return torch.LongTensor(symbols)
| state-spaces-main | src/dataloaders/utils/vocabulary.py |
"""Utilities for special optimizer hyperparameters.
`group_parameters_for_optimizer` is a modification of timm's optimizer logic, which is currently unused.
`add_optimizer_hooks` is an improved version that uses this codebase's _optim dictionary.
"""
import inspect
import torch.nn as nn
import hydra
def add_optimizer_hooks(
model,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Handle special optimizer logic by setting _optim attribute.
Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Separate out all parameters to those that will and won't experience regularizing weight decay
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
if (not bias_weight_decay and pn.endswith('bias')) \
or getattr(p, '_no_weight_decay', False) \
or isinstance(m, blacklist_weight_modules):
setattr(p, "_optim", {"weight_decay": 0.0})
def group_parameters_for_optimizer(
model,
optimizer_cfg,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Handle special optimizer logic (adapted from timm).
Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Get the weight decay from the config, or from the default value of the optimizer constructor
# if it's not specified in the config.
if 'weight_decay' in optimizer_cfg:
weight_decay = optimizer_cfg.weight_decay
else:
# https://stackoverflow.com/questions/12627118/get-a-function-arguments-default-value
signature = inspect.signature(hydra.utils.get_class(optimizer_cfg._target_))
if 'weight_decay' in signature.parameters:
weight_decay = signature.parameters['weight_decay'].default
if weight_decay is inspect.Parameter.empty:
weight_decay = 0.0
else:
weight_decay = 0.0
# If none of the parameters have weight decay anyway, and there are no parameters with special
# optimization params
if weight_decay == 0.0 and not any(hasattr(p, '_optim') for p in model.parameters()):
return model.parameters()
skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay') else set()
skip_keywords = (model.no_weight_decay_keywords() if hasattr(model, 'no_weight_decay_keywords')
else set())
# Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L134
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
special = set()
whitelist_weight_modules = (nn.Linear, )
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if not p.requires_grad:
continue # frozen weights
if hasattr(p, '_optim'):
special.add(fpn)
elif fpn in skip or any(skip_keyword in fpn for skip_keyword in skip_keywords):
no_decay.add(fpn)
elif getattr(p, '_no_weight_decay', False):
no_decay.add(fpn)
elif not bias_weight_decay and pn.endswith('bias'):
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
# special case the position embedding parameter in the root GPT module as not decayed
if 'pos_emb' in param_dict:
no_decay.add('pos_emb')
# In case of parameter sharing, some parameters show up in decay but are not in param_dict.keys()
decay &= param_dict.keys()
decay |= (param_dict.keys() - no_decay - special)
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, f"Parameters {str(inter_params)} made it into both decay/no_decay sets!"
assert len(param_dict.keys() - special - union_params) == 0, f"parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
if weight_decay == 0.0 or not no_decay:
param_groups = [{"params": [param_dict[pn] for pn in sorted(list(no_decay | decay))],
"weight_decay": weight_decay}]
else:
param_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# Add parameters with special hyperparameters
# Unique dicts
hps = [dict(s) for s in set(frozenset(param_dict[pn]._optim.items()) for pn in special)]
for hp in hps:
params = [param_dict[pn] for pn in sorted(list(special)) if param_dict[pn]._optim == hp]
param_groups.append({"params": params, **hp})
return param_groups
| state-spaces-main | src/utils/optim_groups.py |
"""Utilities for dealing with collection objects (lists, dicts) and configs."""
from typing import Sequence, Mapping, Optional, Callable
import functools
import hydra
from omegaconf import ListConfig, DictConfig
# TODO this is usually used in a pattern where it's turned into a list, so can just do that here
def is_list(x):
return isinstance(x, Sequence) and not isinstance(x, str)
def is_dict(x):
return isinstance(x, Mapping)
def to_dict(x, recursive=True):
"""Convert Sequence or Mapping object to dict.
lists get converted to {0: x[0], 1: x[1], ...}
"""
if is_list(x):
x = {i: v for i, v in enumerate(x)}
if is_dict(x):
if recursive:
return {k: to_dict(v, recursive=recursive) for k, v in x.items()}
else:
return dict(x)
else:
return x
def to_list(x, recursive=False):
"""Convert an object to list.
If Sequence (e.g. list, tuple, Listconfig): just return it
Special case: If non-recursive and not a list, wrap in list
"""
if is_list(x):
if recursive:
return [to_list(_x) for _x in x]
else:
return list(x)
else:
if recursive:
return x
else:
return [x]
def extract_attrs_from_obj(obj, *attrs):
if obj is None:
assert len(attrs) == 0
return []
return [getattr(obj, attr, None) for attr in attrs]
def instantiate(registry, config, *args, partial=False, wrap=None, **kwargs):
"""Instantiate a class or Callable. Wraps hydra.utils.instantiate.
registry: Dictionary mapping names to functions or target paths (e.g. {'model': 'models.SequenceModel'})
config: Dictionary with a '_name_' key indicating which element of the registry to grab, and kwargs to be passed into the target constructor
wrap: wrap the target class (e.g. ema optimizer or tasks.wrap)
*args, **kwargs: additional arguments to override the config to pass into the target constructor
"""
# Case 1: no config
if config is None:
return None
# Case 2a: string means _name_ was overloaded
if isinstance(config, str):
_name_ = None
_target_ = registry[config]
config = {}
# Case 2b: grab the desired callable from name
else:
_name_ = config.pop("_name_")
_target_ = registry[_name_]
# Retrieve the right constructor automatically based on type
if isinstance(_target_, str):
fn = hydra.utils.get_method(path=_target_)
elif isinstance(_target_, Callable):
fn = _target_
else:
raise NotImplementedError("instantiate target must be string or callable")
# Instantiate object
if wrap is not None:
fn = wrap(fn)
obj = functools.partial(fn, *args, **config, **kwargs)
# Restore _name_
if _name_ is not None:
config["_name_"] = _name_
if partial:
return obj
else:
return obj()
def get_class(registry, _name_):
return hydra.utils.get_class(path=registry[_name_])
def omegaconf_filter_keys(d, fn=None):
"""Only keep keys where fn(key) is True. Support nested DictConfig."""
# TODO can make this inplace?
if fn is None:
fn = lambda _: True
if is_list(d):
return ListConfig([omegaconf_filter_keys(v, fn) for v in d])
elif is_dict(d):
return DictConfig(
{k: omegaconf_filter_keys(v, fn) for k, v in d.items() if fn(k)}
)
else:
return d
| state-spaces-main | src/utils/config.py |
optimizer = {
"adam": "torch.optim.Adam",
"adamw": "torch.optim.AdamW",
"rmsprop": "torch.optim.RMSprop",
"sgd": "torch.optim.SGD",
"lamb": "src.utils.optim.lamb.JITLamb",
}
scheduler = {
"constant": "transformers.get_constant_schedule",
"plateau": "torch.optim.lr_scheduler.ReduceLROnPlateau",
"step": "torch.optim.lr_scheduler.StepLR",
"multistep": "torch.optim.lr_scheduler.MultiStepLR",
"cosine": "torch.optim.lr_scheduler.CosineAnnealingLR",
"constant_warmup": "transformers.get_constant_schedule_with_warmup",
"linear_warmup": "transformers.get_linear_schedule_with_warmup",
"cosine_warmup": "transformers.get_cosine_schedule_with_warmup",
"timm_cosine": "src.utils.optim.schedulers.TimmCosineLRScheduler",
}
callbacks = {
"timer": "src.callbacks.timer.Timer",
"params": "src.callbacks.params.ParamsLog",
"learning_rate_monitor": "pytorch_lightning.callbacks.LearningRateMonitor",
"model_checkpoint": "pytorch_lightning.callbacks.ModelCheckpoint",
"early_stopping": "pytorch_lightning.callbacks.EarlyStopping",
"swa": "pytorch_lightning.callbacks.StochasticWeightAveraging",
"rich_model_summary": "pytorch_lightning.callbacks.RichModelSummary",
"rich_progress_bar": "pytorch_lightning.callbacks.RichProgressBar",
"progressive_resizing": "src.callbacks.progressive_resizing.ProgressiveResizing",
# "profiler": "pytorch_lightning.profilers.PyTorchProfiler",
}
model = {
# Backbones from this repo
"model": "src.models.sequence.backbones.model.SequenceModel",
"unet": "src.models.sequence.backbones.unet.SequenceUNet",
"sashimi": "src.models.sequence.backbones.sashimi.Sashimi",
"sashimi_standalone": "models.sashimi.sashimi.Sashimi",
# Baseline RNNs
"lstm": "src.models.baselines.lstm.TorchLSTM",
"gru": "src.models.baselines.gru.TorchGRU",
"unicornn": "src.models.baselines.unicornn.UnICORNN",
"odelstm": "src.models.baselines.odelstm.ODELSTM",
"lipschitzrnn": "src.models.baselines.lipschitzrnn.RnnModels",
"stackedrnn": "src.models.baselines.samplernn.StackedRNN",
"stackedrnn_baseline": "src.models.baselines.samplernn.StackedRNNBaseline",
"samplernn": "src.models.baselines.samplernn.SampleRNN",
"dcgru": "src.models.baselines.dcgru.DCRNNModel_classification",
"dcgru_ss": "src.models.baselines.dcgru.DCRNNModel_nextTimePred",
# Baseline CNNs
"ckconv": "src.models.baselines.ckconv.ClassificationCKCNN",
"wavegan": "src.models.baselines.wavegan.WaveGANDiscriminator", # DEPRECATED
"denseinception": "src.models.baselines.dense_inception.DenseInception",
"wavenet": "src.models.baselines.wavenet.WaveNetModel",
"torch/resnet2d": "src.models.baselines.resnet.TorchVisionResnet", # 2D ResNet
# Nonaka 1D CNN baselines
"nonaka/resnet18": "src.models.baselines.nonaka.resnet.resnet1d18",
"nonaka/inception": "src.models.baselines.nonaka.inception.inception1d",
"nonaka/xresnet50": "src.models.baselines.nonaka.xresnet.xresnet1d50",
# ViT Variants (note: small variant is taken from Tri, differs from original)
"vit": "models.baselines.vit.ViT",
"vit_s_16": "src.models.baselines.vit_all.vit_small_patch16_224",
"vit_b_16": "src.models.baselines.vit_all.vit_base_patch16_224",
# Timm models
"timm/convnext_base": "src.models.baselines.convnext_timm.convnext_base",
"timm/convnext_small": "src.models.baselines.convnext_timm.convnext_small",
"timm/convnext_tiny": "src.models.baselines.convnext_timm.convnext_tiny",
"timm/convnext_micro": "src.models.baselines.convnext_timm.convnext_micro",
"timm/resnet50": "src.models.baselines.resnet_timm.resnet50", # Can also register many other variants in resnet_timm
"timm/convnext_tiny_3d": "src.models.baselines.convnext_timm.convnext3d_tiny",
# Segmentation models
"convnext_unet_tiny": "src.models.segmentation.convnext_unet.convnext_tiny_unet",
}
layer = {
"id": "src.models.sequence.base.SequenceIdentity",
"lstm": "src.models.baselines.lstm.TorchLSTM",
"standalone": "models.s4.s4.S4Block",
"s4d": "models.s4.s4d.S4D",
"ffn": "src.models.sequence.modules.ffn.FFN",
"sru": "src.models.sequence.rnns.sru.SRURNN",
"rnn": "src.models.sequence.rnns.rnn.RNN", # General RNN wrapper
"conv1d": "src.models.sequence.convs.conv1d.Conv1d",
"conv2d": "src.models.sequence.convs.conv2d.Conv2d",
"mha": "src.models.sequence.attention.mha.MultiheadAttention",
"vit": "src.models.sequence.attention.mha.VitAttention",
"performer": "src.models.sequence.attention.linear.Performer",
"lssl": "src.models.sequence.modules.lssl.LSSL",
"s4": "src.models.sequence.modules.s4block.S4Block",
"fftconv": "src.models.sequence.kernels.fftconv.FFTConv",
"s4nd": "src.models.sequence.modules.s4nd.S4ND",
"mega": "src.models.sequence.modules.mega.MegaBlock",
"h3": "src.models.sequence.experimental.h3.H3",
"h4": "src.models.sequence.experimental.h4.H4",
# 'packedrnn': 'models.sequence.rnns.packedrnn.PackedRNN',
}
layer_decay = {
'convnext_timm_tiny': 'src.models.baselines.convnext_timm.get_num_layer_for_convnext_tiny',
}
model_state_hook = {
'convnext_timm_tiny_2d_to_3d': 'src.models.baselines.convnext_timm.convnext_timm_tiny_2d_to_3d',
'convnext_timm_tiny_s4nd_2d_to_3d': 'src.models.baselines.convnext_timm.convnext_timm_tiny_s4nd_2d_to_3d',
}
| state-spaces-main | src/utils/registry.py |
from .config import is_list, is_dict, to_list, to_dict, get_class, instantiate
| state-spaces-main | src/utils/__init__.py |
import math
import numpy as np
import torch
### Bit reversal permutation
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
def transpose_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices = indices.T
indices = indices.reshape(h*w)
return indices
def snake_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices[1::2, :] = indices[1::2, ::-1]
indices = indices.reshape(h*w)
return indices
def hilbert_permutation(n):
m = int(math.log2(n))
assert n == 2**m
inds = decode(list(range(n*n)), 2, m)
ind_x, ind_y = inds.T
indices = np.arange(n*n).reshape((n, n))
indices = indices[ind_x, ind_y]
return(indices)
""" Hilbert curve utilities taken from https://github.com/PrincetonLIPS/numpy-hilbert-curve """
def decode(hilberts, num_dims, num_bits):
''' Decode an array of Hilbert integers into locations in a hypercube.
This is a vectorized-ish version of the Hilbert curve implementation by John
Skilling as described in:
Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference
Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.
Params:
-------
hilberts - An ndarray of Hilbert integers. Must be an integer dtype and
cannot have fewer bits than num_dims * num_bits.
num_dims - The dimensionality of the hypercube. Integer.
num_bits - The number of bits for each dimension. Integer.
Returns:
--------
The output is an ndarray of unsigned integers with the same shape as hilberts
but with an additional dimension of size num_dims.
'''
if num_dims*num_bits > 64:
raise ValueError(
'''
num_dims=%d and num_bits=%d for %d bits total, which can't be encoded
into a uint64. Are you sure you need that many points on your Hilbert
curve?
''' % (num_dims, num_bits)
)
# Handle the case where we got handed a naked integer.
hilberts = np.atleast_1d(hilberts)
# Keep around the shape for later.
orig_shape = hilberts.shape
# Treat each of the hilberts as a sequence of eight uint8.
# This treats all of the inputs as uint64 and makes things uniform.
hh_uint8 = np.reshape(hilberts.ravel().astype('>u8').view(np.uint8), (-1, 8))
# Turn these lists of uints into lists of bits and then truncate to the size
# we actually need for using Skilling's procedure.
hh_bits = np.unpackbits(hh_uint8, axis=1)[:,-num_dims*num_bits:]
# Take the sequence of bits and Gray-code it.
gray = binary2gray(hh_bits)
# There has got to be a better way to do this.
# I could index them differently, but the eventual packbits likes it this way.
gray = np.swapaxes(
np.reshape(gray, (-1, num_bits, num_dims)),
axis1=1, axis2=2,
)
# Iterate backwards through the bits.
for bit in range(num_bits-1, -1, -1):
# Iterate backwards through the dimensions.
for dim in range(num_dims-1, -1, -1):
# Identify which ones have this bit active.
mask = gray[:,dim,bit]
# Where this bit is on, invert the 0 dimension for lower bits.
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], mask[:,np.newaxis])
# Where the bit is off, exchange the lower bits with the 0 dimension.
to_flip = np.logical_and(
np.logical_not(mask[:,np.newaxis]),
np.logical_xor(gray[:,0,bit+1:], gray[:,dim,bit+1:])
)
gray[:,dim,bit+1:] = np.logical_xor(gray[:,dim,bit+1:], to_flip)
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], to_flip)
# Pad back out to 64 bits.
extra_dims = 64 - num_bits
padded = np.pad(gray, ((0,0), (0,0), (extra_dims,0)),
mode='constant', constant_values=0)
# Now chop these up into blocks of 8.
locs_chopped = np.reshape(padded[:,:,::-1], (-1, num_dims, 8, 8))
# Take those blocks and turn them unto uint8s.
locs_uint8 = np.squeeze(np.packbits(locs_chopped, bitorder='little', axis=3))
# Finally, treat these as uint64s.
flat_locs = locs_uint8.view(np.uint64)
# Return them in the expected shape.
return np.reshape(flat_locs, (*orig_shape, num_dims))
def right_shift(binary, k=1, axis=-1):
''' Right shift an array of binary values.
Parameters:
-----------
binary: An ndarray of binary values.
k: The number of bits to shift. Default 1.
axis: The axis along which to shift. Default -1.
Returns:
--------
Returns an ndarray with zero prepended and the ends truncated, along
whatever axis was specified.
'''
# If we're shifting the whole thing, just return zeros.
if binary.shape[axis] <= k:
return np.zeros_like(binary)
# Determine the padding pattern.
padding = [(0,0)] * len(binary.shape)
padding[axis] = (k,0)
# Determine the slicing pattern to eliminate just the last one.
slicing = [slice(None)] * len(binary.shape)
slicing[axis] = slice(None, -k)
shifted = np.pad(binary[tuple(slicing)], padding,
mode='constant', constant_values=0)
return shifted
def binary2gray(binary, axis=-1):
''' Convert an array of binary values into Gray codes.
This uses the classic X ^ (X >> 1) trick to compute the Gray code.
Parameters:
-----------
binary: An ndarray of binary values.
axis: The axis along which to compute the gray code. Default=-1.
Returns:
--------
Returns an ndarray of Gray codes.
'''
shifted = right_shift(binary, axis=axis)
# Do the X ^ (X >> 1) trick.
gray = np.logical_xor(binary, shifted)
return gray
| state-spaces-main | src/utils/permutations.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
import torch
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
torch.distributed.init_process_group(backend=backend,
init_method='env://')
assert torch.distributed.is_initialized()
return distributed
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def all_reduce_item(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor.item()
else:
ret = value
return ret
def all_reduce_tensor(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = value
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor
else:
ret = value
return ret
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
| state-spaces-main | src/utils/distributed.py |
"""Utils for the training loop. Adapted from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py."""
import logging
import os
import warnings
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from src.utils.config import omegaconf_filter_keys
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
# [21-09-17 AG] copied from Tri's infra, doesn't appear to be used
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
def process_config(config: DictConfig) -> DictConfig: # TODO because of filter_keys, this is no longer in place
"""A couple of optional utilities, controlled by main config file:
- disabling warnings
- easier access to debug mode
- forcing debug friendly configuration
Modifies DictConfig in place.
Args:
config (DictConfig): Configuration composed by Hydra.
"""
log = get_logger()
OmegaConf.register_new_resolver('eval', eval)
# Filter out keys that were used just for interpolation
# config = dictconfig_filter_keys(config, lambda k: not k.startswith('__'))
config = omegaconf_filter_keys(config, lambda k: not k.startswith('__'))
# enable adding new keys to config
OmegaConf.set_struct(config, False)
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
if config.get("debug"):
log.info("Running in debug mode! <config.debug=True>")
config.trainer.fast_dev_run = True
# force debugger friendly configuration
log.info("Forcing debugger friendly configuration! <config.trainer.fast_dev_run=True>")
# Debuggers don't like GPUs or multiprocessing
if config.trainer.get("gpus"):
config.trainer.gpus = 0
if config.loader.get("pin_memory"):
config.loader.pin_memory = False
if config.loader.get("num_workers"):
config.loader.num_workers = 0
# disable adding new keys to config
# OmegaConf.set_struct(config, True) # [21-09-17 AG] I need this for .pop(_name_) pattern among other things
return config
@rank_zero_only
def print_config(
config: DictConfig,
# fields: Sequence[str] = (
# "trainer",
# "model",
# "datamodule",
# "train",
# "callbacks",
# "logger",
# "seed",
# ),
resolve: bool = True,
save_cfg=True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
fields = config.keys()
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
if save_cfg:
with open("config_tree.txt", "w") as fp:
rich.print(tree, file=fp)
def log_optimizer(logger, optimizer, keys):
""" Log values of particular keys from the optimizer's param groups """
keys = sorted(keys)
for i, g in enumerate(optimizer.param_groups):
group_hps = {k: g.get(k, None) for k in keys}
n_params = sum(p.numel() for p in g['params'])
logger.info(' | '.join([
f"Optimizer group {i}",
f"{len(g['params'])} tensors",
f"{n_params} parameters",
] + [f"{k} {v}" for k, v in group_hps.items()]))
# print(f"Optimizer group {i} | {len(g['params'])} tensors | lr {g['lr']} | wd {g.get('weight_decay', None)}")
"""Old code for resuming logic moved to tests/"""
| state-spaces-main | src/utils/train.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Lamb optimizer."""
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.norm(p=2).clamp_(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.norm(p=2)
if weight_norm == 0.0 or adam_norm == 0.0:
trust_ratio = 1
else:
trust_ratio = weight_norm / (adam_norm + group['eps'])
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
@torch.jit.script
def lamb_kernel(param, grad, exp_avg, exp_avg_sq, beta1: float,
beta2: float, step_size: float, eps: float, weight_decay: float):
exp_avg = exp_avg * beta1 + (1 - beta1) * grad
exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad)
adam_step = exp_avg / (exp_avg_sq.sqrt() + eps)
adam_step = adam_step + weight_decay * param
weight_norm = param.norm(p=2).clamp(0, 10)
adam_norm = adam_step.norm(p=2)
trust_ratio = weight_norm / (adam_norm + eps)
trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio
trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio
trust_ratio = trust_ratio.float()
param = param - step_size * trust_ratio * adam_step
return param, exp_avg, exp_avg_sq
class JITLamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
step_size = group['lr']
param, exp_avg, exp_avg_sq = lamb_kernel(p.data, grad, exp_avg,
exp_avg_sq, beta1,
beta2, step_size,
group['eps'],
group['weight_decay'],
)
state['exp_avg'] = exp_avg
state['exp_avg_sq'] = exp_avg_sq
p.data = param
return loss
| state-spaces-main | src/utils/optim/lamb.py |
"""Wrapper of optimizers in torch.optim for computation of exponential moving average of parameters.
Source: https://github.com/kamenbliznashki/pixel_models/blob/master/optim.py
"""
import torch
def build_ema_optimizer(optimizer_cls):
class Optimizer(optimizer_cls):
def __init__(self, *args, polyak=0.0, **kwargs):
if not 0.0 <= polyak <= 1.0:
raise ValueError("Invalid polyak decay rate: {}".format(polyak))
super().__init__(*args, **kwargs)
self.defaults['polyak'] = polyak
self.stepped = False
def step(self, closure=None):
super().step(closure)
self.stepped = True
# update exponential moving average after gradient update to parameters
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# state initialization
if 'ema' not in state:
state['ema'] = p.data.clone() # torch.zeros_like(p.data)
# ema update
state['ema'] -= (1 - self.defaults['polyak']) * (state['ema'] - p.data)
def swap_ema(self):
""" substitute exponential moving average values into parameter values """
for group in self.param_groups:
for p in group['params']:
data = p.data
state = self.state[p]
p.data = state['ema']
state['ema'] = data
def __repr__(self):
s = super().__repr__()
return self.__class__.__mro__[1].__name__ + ' (\npolyak: {}\n'.format(self.defaults['polyak']) + s.partition('\n')[2]
Optimizer.__name__ = optimizer_cls.__name__
return Optimizer
Adam = build_ema_optimizer(torch.optim.Adam)
RMSprop = build_ema_optimizer(torch.optim.RMSprop)
| state-spaces-main | src/utils/optim/ema.py |
"""Custom learning rate schedulers."""
import math
import warnings
import torch
from timm.scheduler import CosineLRScheduler
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, warmup_step=0, **kwargs):
self.warmup_step = warmup_step
super().__init__(optimizer, T_max - warmup_step, eta_min, *kwargs)
# Copied from CosineAnnealingLR, but adding warmup and changing self.last_epoch to
# self.last_epoch - self.warmup_step.
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == self.warmup_step: # also covers the case where both are 0
return self.base_lrs
elif self.last_epoch < self.warmup_step:
return [base_lr * (self.last_epoch + 1) / self.warmup_step for base_lr in self.base_lrs]
elif (self.last_epoch - self.warmup_step - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step) / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
_get_closed_form_lr = None
def InvSqrt(optimizer, warmup_step):
"""Originally used for Transformer (in "Attention is All You Need")."""
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. / (step ** 0.5) if step > warmup_step else (step + 1) / (warmup_step ** 1.5)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
def Constant(optimizer, warmup_step):
def lr_lambda(step):
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. if step > warmup_step else (step + 1) / warmup_step
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
"""Wrap timm.scheduler.CosineLRScheduler so we can call scheduler.step() without passing in epoch.
It supports resuming as well.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_epoch = -1
self.step(epoch=0)
def step(self, epoch=None):
if epoch is None:
self._last_epoch += 1
else:
self._last_epoch = epoch
# We call either step or step_update, depending on whether we're using the scheduler every
# epoch or every step.
# Otherwise, lightning will always call step (i.e., meant for each epoch), and if we set
# scheduler interval to "step", then the learning rate update will be wrong.
if self.t_in_epochs:
super().step(epoch=self._last_epoch)
else:
super().step_update(num_updates=self._last_epoch)
| state-spaces-main | src/utils/optim/schedulers.py |
"""Implementations of different types of residual functions."""
import torch
from torch import nn
class Residual(nn.Module):
"""Residual connection with constant affine weights.
Can simulate standard residual, no residual, and "constant gates".
"""
def __init__(self, i_layer, d_input, d_model, alpha=1.0, beta=1.0):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__()
assert (d_input == d_model) or alpha == 0.0
self.i_layer = i_layer
self.d_input = d_input
self.d_model = d_model
self.alpha = alpha
self.beta = beta
@property
def d_output(self):
return self.d_model
def forward(self, x, y, transposed): # TODO documentation of transposed
y = self.beta*y if self.beta != 1.0 else y
return self.alpha * x + y if self.alpha else y
class Affine(Residual):
"""Residual with learnable scalar multipliers on the main branch.
Arguments:
- scalar: Single scalar multiplier, or one per dimension
- scale, power: Initialize to scale * layer_num**(-power)
"""
def __init__(self, *args, scalar=True, gamma=0.0, **kwargs):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__(*args, **kwargs)
self.scalar = scalar
self.gamma = gamma
c = self.beta * self.i_layer ** (-self.gamma)
d = 1 if self.scalar else self.d_input
self.affine = nn.Parameter(c * torch.ones(d))
def forward(self, x, y, transposed): # TODO documentation of transposed
c = self.affine
if transposed: c = c.unsqueeze(-1)
return self.alpha * x + c * y
class Feedforward(Residual):
def __init__(self, *args):
# print("Feedforward extra kwargs", kwargs)
super().__init__(*args, alpha=0.0, beta=1.0)
class Highway(Residual):
def __init__(self, *args, scaling_correction=False, elemwise=False):
super().__init__(*args)
self.scaling_correction = 1.732 if scaling_correction else 1.0 # TODO
self.elemwise = elemwise
self.Wx = nn.Linear(self.d_input, self.d_input)
if self.elemwise:
self.Wy = nn.Parameter(torch.randn(self.d_input))
else:
self.Wy = nn.Linear(self.d_input, self.d_input)
def forward(self, x, y, transposed=False): # TODO handle this case
if self.elemwise:
y = self.Wy * y
else:
y = self.Wy(y)
r = torch.sigmoid(self.Wx(x) + y)
z = self.scaling_correction * (1.-r) * x + r * y
return z
class DecayResidual(Residual):
""" Residual connection that can decay the linear combination depending on depth. """
def __init__(self, *args, power=0.5, l2=True):
# print("DecayResidual extra kwargs", kwargs)
super().__init__(*args)
self.power = power
self.l2 = l2
def forward(self, x, y, transposed):
beta = self.i_layer ** (-self.power)
if self.l2:
alpha = (1. - beta**2)**0.5
else:
alpha = 1. - beta
return alpha * x + beta * y
registry = {
'F': Feedforward,
'N': Feedforward,
'R': Residual,
'H': Highway,
'D': DecayResidual,
'A': Affine,
'none': Feedforward,
'ff': Feedforward,
'feedforward': Feedforward,
'residual': Residual,
'highway': Highway,
'decay': DecayResidual,
'affine': Affine,
}
| state-spaces-main | src/models/nn/residual.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive softmax layer for language modeling (e.g. WikiText-103)."""
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import src.models.nn.utils as U
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
# First term accounts for cluster probabilities
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0) # TODO This should be a bug in the original implementation; it should go into the continue case above as well
return nll.mean() # TODO maybe cases for length or padding_mask
def compute_logits(self, hidden):
"""Compute full vector of logits
Adapted from https://github.com/kimiyoung/transformer-xl/issues/88
"""
hidden = hidden.reshape(-1, hidden.size(-1))
if self.n_clusters == 0:
logits = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
return logits
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
out_full_logps = [head_logprob[:, :self.cutoffs[0]]]
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(1, len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
head_logprob_i = head_logprob # .index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden # .index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i].view(-1, 1) + tail_logprob_i
offset += logprob_i.size(0)
out_full_logps.append(logprob_i)
out_full_logps = torch.cat(out_full_logps, dim = 1)
# print(torch.sum(out_full_ps), out_full_ps.shape)
return out_full_logps
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.reshape(-1)
# Changes from original impl
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
| state-spaces-main | src/models/nn/adaptive_softmax.py |
"""Linear nn components."""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from src.models.nn.activation import Activation
contract = torch.einsum
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear', 'modrelu' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
"""Returns a linear nn.Module with control over axes order, initialization, and activation."""
# Construct core module
# linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
linear_cls = TransposedLinear if transposed else nn.Linear
if activation is not None and activation.startswith('glu'): d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, d_output, dim=1 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class TransposedLinear(nn.Module):
"""Linear module on the second-to-last dimension.
Assumes shape (B, D, L), where L can be 1 or more axis.
"""
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
setattr(self.bias, "_optim", {"weight_decay": 0.0})
else:
self.bias = 0.0
def forward(self, x):
num_axis = len(x.shape[2:]) # num_axis in L, for broadcasting bias
y = contract('b u ..., v u -> b v ...', x, self.weight) + self.bias.view(-1, *[1]*num_axis)
return y
| state-spaces-main | src/models/nn/linear.py |
from .linear import LinearActivation, TransposedLinear
from .activation import Activation
from .normalization import Normalization
from .dropout import DropoutNd, StochasticDepth
| state-spaces-main | src/models/nn/__init__.py |
"""Original from Transformer-XL as a hook for their initialization. Currently not used."""
import torch
from torch import nn
def init_weight(weight, init_cfg):
if init_cfg.init == 'uniform':
nn.init.uniform_(weight, -init_cfg.init_range, init_cfg.init_range)
elif init_cfg.init == 'normal':
nn.init.normal_(weight, 0.0, init_cfg.init_std)
elif init_cfg.init == 'xavier':
nn.init.xavier_uniform_(weight)
elif init_cfg.init == 'kaiming':
nn.init.kaiming_normal_(weight, mode='fan_in', nonlinearity='linear')
else:
raise NotImplementedError(f"initialization type {init_cfg.init} not supported")
def init_bias(bias, init_cfg):
if hasattr(init_cfg, 'zero_bias') and init_cfg.zero_bias==False:
# Keep the original bias init
pass
else:
nn.init.constant_(bias, 0.0)
def weights_init(m, init_cfg):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init_weight(m.weight, init_cfg)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias, init_cfg)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
if hasattr(init_cfg, 'ln') and init_cfg.ln==False:
pass
else:
nn.init.normal_(m.weight, 1.0, init_cfg.init_std)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias, init_cfg)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb, init_cfg)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias, init_cfg)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias, init_cfg)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias, init_cfg)
if hasattr(m, 'initial_state'):
init_bias(m.initial_state, init_cfg)
def weights_init_embedding(m, init_cfg):
classname = m.__class__.__name__
if classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, init_cfg.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
init_weight(m.weight, init_cfg)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
init_weight(m.cluster_weight, init_cfg)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
init_bias(m.cluster_bias, init_cfg)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, init_cfg.proj_init_std)
if hasattr(m, 'out_layers_weights'):
for i in range(len(m.out_layers_weights)):
if m.out_layers_weights[i] is not None:
init_weight(m.out_layers_weights[i], init_cfg)
| state-spaces-main | src/models/nn/initialization.py |
"""Utilities for activation functions."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def Activation(activation=None, size=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear', 'none' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation == 'elu':
return nn.ELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation.startswith('glu-'):
return GLU(dim=dim, activation=activation[4:])
elif activation == 'sigmoid':
return nn.Sigmoid()
elif activation == 'softplus':
return nn.Softplus()
elif activation == 'modrelu':
return ModReLU(size)
elif activation in ['sqrelu', 'relu2']:
return SquaredReLU()
elif activation == 'laplace':
return Laplace()
# Earlier experimentation with a LN in the middle of the block instead of activation
# IIRC ConvNext does something like this?
# elif activation == 'ln':
# return TransposedLN(dim)
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
class GLU(nn.Module):
def __init__(self, dim=-1, activation='sigmoid'):
super().__init__()
assert not activation.startswith('glu')
self.dim = dim
self.activation_fn = Activation(activation)
def forward(self, x):
x, g = torch.split(x, x.size(self.dim)//2, dim=self.dim)
return x * self.activation_fn(g)
class ModReLU(nn.Module):
# Adapted from https://github.com/Lezcano/expRNN
def __init__(self, features):
# For now we just support square layers
super().__init__()
self.features = features
self.b = nn.Parameter(torch.Tensor(self.features))
self.reset_parameters()
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.01)
def forward(self, inputs):
norm = torch.abs(inputs)
biased_norm = norm + self.b
magnitude = F.relu(biased_norm)
phase = torch.sign(inputs)
return phase * magnitude
class SquaredReLU(nn.Module):
def forward(self, x):
# return F.relu(x)**2
return torch.square(F.relu(x)) # Could this be faster?
def laplace(x, mu=0.707107, sigma=0.282095):
x = (x - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(x))
class Laplace(nn.Module):
def __init__(self, mu=0.707107, sigma=0.282095):
super().__init__()
self.mu = mu
self.sigma = sigma
def forward(self, x):
return laplace(x, mu=self.mu, sigma=self.sigma)
| state-spaces-main | src/models/nn/activation.py |
"""Utility wrappers around modules to let them handle extra arguments."""
import inspect
from functools import wraps
import torch
from torch import nn
def wrap_kwargs(f):
"""Wrap a Callable to pass through extra arguments.
Given a callable f that can consume some named arguments,
wrap it with a kwargs that passes back any unused args
EXAMPLES
--------
Basic usage:
def foo(x, y=None):
return x
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
--------
The wrapped function can return its own argument dictionary,
which gets merged with the new kwargs.
def foo(x, y=None):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
def foo(x, y=None):
return x, {"y": y, "z": None}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'y': 1, 'z': 2})
--------
The wrapped function can have its own kwargs parameter:
def foo(x, y=None, **kw_args):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {})
--------
Partial functions and modules work automatically:
class Module:
def forward(self, x, y=0):
return x, {"y": y+1}
m = Module()
wrap_kwargs(m.forward)(0, y=1, z=2) == (0, {'y': 2, 'z': 2})
"""
sig = inspect.signature(f)
# Check if f already has kwargs
has_kwargs = any([
param.kind == inspect.Parameter.VAR_KEYWORD
for param in sig.parameters.values()
])
if has_kwargs:
@wraps(f)
def f_kwargs(*args, **kwargs):
y = f(*args, **kwargs)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return y
else:
return y, {}
else:
param_kwargs = inspect.Parameter("kwargs", kind=inspect.Parameter.VAR_KEYWORD)
sig_kwargs = inspect.Signature(parameters=list(sig.parameters.values())+[param_kwargs])
@wraps(f)
def f_kwargs(*args, **kwargs):
bound = sig_kwargs.bind(*args, **kwargs)
if "kwargs" in bound.arguments:
kwargs = bound.arguments.pop("kwargs")
else:
kwargs = {}
y = f(**bound.arguments)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return *y[:-1], {**y[-1], **kwargs}
else:
return y, kwargs
return f_kwargs
def discard_kwargs(f):
if f is None: return None
f_kwargs = wrap_kwargs(f)
@wraps(f)
def f_(*args, **kwargs):
return f_kwargs(*args, **kwargs)[0]
return f_
def PassthroughSequential(*modules):
"""Special Sequential module that chains kwargs.
Semantics are the same as nn.Sequential, with extra convenience features:
- Discard None modules
- Flatten inner Sequential modules
- In case with 0 or 1 Module, rename the class for ease of inspection
"""
def flatten(module):
if isinstance(module, nn.Sequential):
return sum([flatten(m) for m in module], [])
else:
return [module]
modules = flatten(nn.Sequential(*modules))
modules = [module for module in modules if module if not None]
class Sequential(nn.Sequential):
def forward(self, x, **kwargs):
for layer in self:
x, kwargs = wrap_kwargs(layer.forward)(x, **kwargs)
return x, kwargs
def step(self, x, **kwargs):
for layer in self:
fn = getattr(layer, "step", layer.forward)
x, kwargs = wrap_kwargs(fn)(x, **kwargs)
return x, kwargs
if len(modules) == 0:
Sequential.__name__ = "Identity"
elif len(modules) == 1:
Sequential.__name__ = type(modules[0]).__name__
return Sequential(*modules)
| state-spaces-main | src/models/nn/utils.py |
"""Defines flexible gating mechanisms.
Based on ideas from LSSL paper and UR-LSTM paper (https://arxiv.org/abs/1910.09890).
"""
import torch
import torch.nn as nn
class Gate(nn.Module):
"""Implements gating mechanisms.
LSSL paper elaborates on the most import connection: A standard sigmoid gate
is equivalent to an exponential parameterization + Backwards Euler disc.
Mechanisms:
N - No gate
G - Standard sigmoid gate
UR - Uniform refine gates
R - Refine gate
FS - Forward discretization, Sigmoid activation [equivalent to G]
BE - Backward discretization, Exp activation [equivalent to G]
BR - Backward discretization, Relu activation
TE - Trapezoid discretization, Exp activation
TR - Trapezoid discretization, Relu activation
TS - Trapezoid discretization, Sigmoid activation (0 to 2)
"""
def __init__(self, size, preact_ctor, preact_args, mechanism='N'):
super().__init__()
self.size = size
self.mechanism = mechanism
if self.mechanism == 'N':
pass
elif self.mechanism in ['G', 'FS', 'BE', 'BR', 'TE', 'TR', 'TS', 'ZE', 'ZR', 'ZS']:
self.W_g = preact_ctor(*preact_args)
elif self.mechanism in ['U', 'UT']:
self.W_g = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'UR':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'R':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
elif self.mechanism in ['GT']:
self.W_g = preact_ctor(*preact_args)
else:
assert False, f'Gating type {self.mechanism} is not supported.'
def forward(self, *inputs):
if self.mechanism == 'N':
return 1.0
if self.mechanism == 'G':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
if self.mechanism == 'U':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
elif self.mechanism == 'UR':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'R':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'UT':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'GT':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
else:
g_preact = self.W_g(*inputs)
# if self.mechanism[1] == 'S':
# g = torch.sigmoid(g_preact)
# elif self.mechanism[1] == 'E':
# g = torch.exp(g_preact)
# elif self.mechanism[1] == 'R':
# g = torch.relu(g_preact)
if self.mechanism == 'FS':
g = torch.sigmoid(g_preact)
g = self.forward_diff(g)
elif self.mechanism == 'BE':
g = torch.exp(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'BR':
g = torch.relu(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'TS':
g = 2 * torch.sigmoid(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TE':
g = torch.exp(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TR':
g = torch.relu(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'ZE':
g = torch.exp(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZR':
g = torch.relu(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZS':
g = torch.sigmoid(g_preact)
g = self.zoh(g)
return g
def forward_diff(self, x):
return x
def backward_diff(self, x):
return x / (1+x)
def trapezoid(self, x):
return x / (1 + x/2)
def zoh(self, x):
return 1 - torch.exp(-x)
| state-spaces-main | src/models/nn/gate.py |
"""Wrapper around expRNN's Orthogonal class for convenience."""
from .exprnn.orthogonal import Orthogonal
from .exprnn.trivializations import expm, cayley_map
from .exprnn.initialization import henaff_init_, cayley_init_
param_name_to_param = {'cayley': cayley_map, 'expm': expm}
init_name_to_init = {'henaff': henaff_init_, 'cayley': cayley_init_}
class OrthogonalLinear(Orthogonal):
def __init__(self, d_input, d_output, method='dtriv', init='cayley', K=100):
"""Wrapper around expRNN's Orthogonal class taking care of parameter names."""
if method == "exprnn":
mode = "static"
param = 'expm'
elif method == "dtriv":
# We use 100 as the default to project back to the manifold.
# This parameter does not really affect the convergence of the algorithms, even for K=1
mode = ("dynamic", K, 100) # TODO maybe K=30? check exprnn codebase
param = 'expm'
elif method == "cayley":
mode = "static"
param = 'cayley'
else:
assert False, f"OrthogonalLinear: orthogonal method {method} not supported"
param = param_name_to_param[param]
init_A = init_name_to_init[init]
super().__init__(d_input, d_output, init_A, mode, param)
# Scale LR by factor of 10
self.A._lr_scale = 0.1
| state-spaces-main | src/models/nn/orthogonal.py |
"""Utility nn components, in particular handling activations, initializations, and normalization layers."""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
def stochastic_depth(input: torch.tensor, p: float, mode: str, training: bool = True):
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
if mode not in ["batch", "row"]:
raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate).div_(survival_rate)
return input * noise
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
# TODO(karan): need to upgrade to torchvision==0.11.0 to use StochasticDepth directly
# from torchvision.ops import StochasticDepth
super().__init__()
self.p = p
self.mode = mode
def forward(self, input):
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + '('
tmpstr += 'p=' + str(self.p)
tmpstr += ', mode=' + str(self.mode)
tmpstr += ')'
return tmpstr
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, X):
"""X: (batch, dim, lengths...)."""
if self.training:
if not self.transposed: X = rearrange(X, 'b ... d -> b d ...')
# binomial = torch.distributions.binomial.Binomial(probs=1-self.p) # This is incredibly slow because of CPU -> GPU copying
mask_shape = X.shape[:2] + (1,)*(X.ndim-2) if self.tie else X.shape
# mask = self.binomial.sample(mask_shape)
mask = torch.rand(*mask_shape, device=X.device) < 1.-self.p
X = X * mask * (1.0/(1-self.p))
if not self.transposed: X = rearrange(X, 'b d ... -> b ... d')
return X
return X
| state-spaces-main | src/models/nn/dropout.py |
"""Implementations of several types of Discrete Sin/Cosine Transforms with various reductions to FFT.
Currently not used by S4.
"""
import torch
import torch.nn as nn
import numpy as np
import scipy.fft
from einops import rearrange, repeat
class DCT(nn.Module):
"""Reductions adapted from https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft."""
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = scipy.fft.dct(np.eye(N), norm=norm, type=2).T
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
"""Baseline DCT type II - matmul by DCT matrix."""
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
"""DCT type II - reduction to FFT size 4N."""
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
z = torch.zeros_like(x)
x = torch.stack([z, x], dim=-1)
x = x.view(x.shape[:-2] + (-1,))
y = torch.fft.fft(x)
y = y[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
"""DCT type II - reduction to FFT size 2N mirrored.
The reduction from the DSP forum is not quite correct in the complex input case.
halfshift(FFT[a, b, c, d, d, c, b, a]) -> [A, B, C, D, 0, -D, -C, -B]
In the case of real input, the intermediate step after FFT has form [A, B, C, D, 0, D*, C*, B*]
"""
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
y = torch.fft.fft(x)[..., :self.N]
y = y * self.Q
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
x = torch.cat([x[..., 0::2], x[..., 1::2].flip(-1)], dim=-1)
y = torch.fft.fft(x)
y = y * 2 * self.Q
if torch.is_complex(x):
y = torch.cat([y[..., :1], (y[..., 1:] + 1j * y[..., 1:].flip(-1)) / 2], dim=-1) # TODO in-place sum
else:
y = torch.real(y)
return y
class IDCT(nn.Module):
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = np.linalg.inv(scipy.fft.dct(np.eye(N), norm=norm, type=2).T)
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(2*self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
"""Baseline DCT type II - matmul by DCT matrix."""
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
"""DCT type II - reduction to FFT size 4N."""
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x.flip(-1), -x[..., 1:], z, x[..., 1:].flip(-1)], dim=-1)
y = torch.fft.ifft(x)
y = y[..., 1:2*self.N:2]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
"""DCT type II - reduction to FFT size 2N mirrored."""
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x[..., 1:].flip(-1)], dim=-1)
x = x / self.Q
y = torch.fft.ifft(x)[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
"""DCT type II - reduction to size N."""
assert self.N == x.shape[-1]
raise NotImplementedError # Straightforward by inverting operations of DCT-II reduction
| state-spaces-main | src/models/nn/dxt.py |
"""Normalization modules."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_='layer',
**kwargs
):
super().__init__()
self.transposed = transposed
self._name_ = _name_
if _name_ == 'layer':
self.channel = True # Normalize over channel dimension
if self.transposed:
self.norm = TransposedLN(d, **kwargs)
else:
self.norm = nn.LayerNorm(d, **kwargs)
elif _name_ == 'instance':
self.channel = False
norm_args = {'affine': False, 'track_running_stats': False}
norm_args.update(kwargs)
self.norm = nn.InstanceNorm1d(d, **norm_args) # (True, True) performs very poorly
elif _name_ == 'batch':
self.channel = False
norm_args = {'affine': True, 'track_running_stats': True}
norm_args.update(kwargs)
self.norm = nn.BatchNorm1d(d, **norm_args)
elif _name_ == 'group':
self.channel = False
self.norm = nn.GroupNorm(1, d, **kwargs)
elif _name_ == 'none':
self.channel = True
self.norm = nn.Identity()
else: raise NotImplementedError
def forward(self, x):
# Handle higher dimension logic
shape = x.shape
if self.transposed:
x = rearrange(x, 'b d ... -> b d (...)')
else:
x = rearrange(x, 'b ... d -> b (...) d')
# The cases of LayerNorm / no normalization are automatically handled in all cases
# Instance/Batch Norm work automatically with transposed axes
if self.channel or self.transposed:
x = self.norm(x)
else:
x = x.transpose(-1, -2)
x = self.norm(x)
x = x.transpose(-1, -2)
x = x.view(shape)
return x
def step(self, x, **kwargs):
assert self._name_ in ["layer", "none"]
if self.transposed: x = x.unsqueeze(-1)
x = self.forward(x)
if self.transposed: x = x.squeeze(-1)
return x
class TransposedLN(nn.Module):
"""LayerNorm module over second dimension.
Assumes shape (B, D, L), where L can be 1 or more axis.
This is slow and a dedicated CUDA/Triton implementation shuld provide substantial end-to-end speedup.
"""
def __init__(self, d, scalar=True):
super().__init__()
self.scalar = scalar
if self.scalar:
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
else:
self.ln = nn.LayerNorm(d)
def forward(self, x):
if self.scalar:
# calc. stats over D dim / channels
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s/s) * (x-m+self.m)
else:
# move channel to last axis, apply layer_norm, then move channel back to second axis
_x = self.ln(rearrange(x, 'b d ... -> b ... d'))
y = rearrange(_x, 'b ... d -> b d ...')
return y
class TSNormalization(nn.Module):
def __init__(self, method, horizon):
super().__init__()
self.method = method
self.horizon = horizon
def forward(self, x):
# x must be BLD
if self.method == 'mean':
self.scale = x.abs()[:, :-self.horizon].mean(dim=1)[:, None, :]
return x / self.scale
elif self.method == 'last':
self.scale = x.abs()[:, -self.horizon-1][:, None, :]
return x / self.scale
return x
class TSInverseNormalization(nn.Module):
def __init__(self, method, normalizer):
super().__init__()
self.method = method
self.normalizer = normalizer
def forward(self, x):
if self.method == 'mean' or self.method == 'last':
return x * self.normalizer.scale
return x
class ReversibleInstanceNorm1dInput(nn.Module):
def __init__(self, d, transposed=False):
super().__init__()
# BLD if transpoed is False, otherwise BDL
self.transposed = transposed
self.norm = nn.InstanceNorm1d(d, affine=True, track_running_stats=False)
def forward(self, x):
# Means, stds
if not self.transposed:
x = x.transpose(-1, -2)
self.s, self.m = torch.std_mean(x, dim=-1, unbiased=False, keepdim=True)
self.s += 1e-4
x = (x - self.m) / self.s
# x = self.norm.weight.unsqueeze(-1) * x + self.norm.bias.unsqueeze(-1)
if not self.transposed:
return x.transpose(-1, -2)
return x
class ReversibleInstanceNorm1dOutput(nn.Module):
def __init__(self, norm_input):
super().__init__()
self.transposed = norm_input.transposed
self.weight = norm_input.norm.weight
self.bias = norm_input.norm.bias
self.norm_input = norm_input
def forward(self, x):
if not self.transposed:
x = x.transpose(-1, -2)
# x = (x - self.bias.unsqueeze(-1))/self.weight.unsqueeze(-1)
x = x * self.norm_input.s + self.norm_input.m
if not self.transposed:
return x.transpose(-1, -2)
return x
| state-spaces-main | src/models/nn/normalization.py |
# Downloaded from https://github.com/Lezcano/expRNN
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
"""Adaptation of expm and expm_frechet in numpy for torch."""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import torch
import scipy.special
def _onenorm_matrix_power_nnm(A, p):
"""Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = torch.ones((A.shape[0], 1), dtype=A.dtype, device=A.device)
M = A.t()
for _ in range(p):
v = M.mm(v)
return torch.max(v).item()
def _onenorm(A):
return torch.norm(A, 1).item()
def _ident_like(A):
return torch.eye(A.shape[0], A.shape[1], dtype=A.dtype, device=A.device)
class _ExpmPadeHelper(object):
"""Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
@property
def A2(self):
if self._A2 is None:
self._A2 = self.A.mm(self.A)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = self.A2.mm(self.A2)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = self.A4.mm(self.A2)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = self.A6.mm(self.A2)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = self.A4.mm(self.A6)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
return self.d4_tight
@property
def d6_loose(self):
return self.d6_tight
@property
def d8_loose(self):
return self.d8_tight
@property
def d10_loose(self):
return self.d10_tight
def pade3(self):
b = (120., 60., 12., 1.)
U = self.A.mm(b[3]*self.A2 + b[1]*self.ident)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = self.A.mm(b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7_scaled(self, s):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U = B.mm(b[7]*B6 + b[5]*B4 + b[3]*B2 + b[1]*self.ident)
V = b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm32(A):
"""Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
"""
return _expm(A)
def _expm(A):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# Trivial case
if A.shape == (1, 1):
return torch.exp(A)
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(A)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
theta3 = 4.2587300348979312e-001
if eta_1 < theta3 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
theta5 = 1.8801526985337688e+000
if eta_2 < theta5 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V)
theta_7 = 3.9257248464332842e+000
eta_3 = max(h.d6_tight, h.d8_loose)
s = max(int(np.ceil(np.log2(eta_3 / theta_7))), 0)
s += _ell(2**-s * h.A, 7)
U, V = h.pade7_scaled(s)
X = _solve_P_Q(U, V)
return torch.matrix_power(X, 2**s)
def _solve_P_Q(U, V):
P = U + V
Q = -U + V
return torch.solve(P, Q)[0]
def _ell(A, m):
"""A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
p = 2*m + 1
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.special.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2.**-24
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
return max(int(np.ceil(np.log2(alpha/u) / (2 * m))), 0)
def differential(f, A, E):
"""Computes the differential of f at A when acting on E: (df)_A(E)."""
n = A.size(0)
M = torch.zeros(2*n, 2*n, dtype=A.dtype, device=A.device, requires_grad=False)
M[:n, :n] = A
M[n:, n:] = A
M[:n, n:] = E
return f(M)[:n, n:]
| state-spaces-main | src/models/nn/exprnn/expm32.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
import numpy as np
import scipy.linalg as la
def henaff_init_(A):
size = A.size(0) // 2
diag = A.new(size).uniform_(-np.pi, np.pi)
return create_diag_(A, diag)
def cayley_init_(A):
size = A.size(0) // 2
diag = A.new(size).uniform_(0., np.pi / 2.)
diag = -torch.sqrt((1. - torch.cos(diag))/(1. + torch.cos(diag)))
return create_diag_(A, diag)
# We include a few more initializations that could be useful for other problems
def haar_init_(A):
"""Haar initialization on SO(n)."""
torch.nn.init.orthogonal_(A)
with torch.no_grad():
if A.det() < 0.:
# Go bijectively from O^-(n) to O^+(n) \iso SO(n)
idx = np.random.randint(0, A.size(0))
A[idx] *= -1.
An = la.logm(A.data.cpu().numpy()).real
An = .5 * (An - An.T)
A.copy_(torch.tensor(An))
return A
def haar_diag_init_(A):
"""Block-diagonal skew-symmetric matrix with eigenvalues distributed as those from a Haar."""
haar_init_(A)
with torch.no_grad():
An = A.data.cpu().numpy()
eig = la.eigvals(An).imag
eig = eig[::2]
if A.size(0) % 2 == 1:
eig = eig[:-1]
eig = torch.tensor(eig)
return create_diag_(A, eig)
def normal_squeeze_diag_init_(A):
size = A.size(0) // 2
diag = A.new(size).normal_(0, 1).fmod_(np.pi/8.)
return create_diag_(A, diag)
def normal_diag_init_(A):
size = A.size(0) // 2
diag = A.new(size).normal_(0, 1).fmod_(np.pi)
return create_diag_(A, diag)
def create_diag_(A, diag):
n = A.size(0)
diag_z = torch.zeros(n-1)
diag_z[::2] = diag
A_init = torch.diag(diag_z, diagonal=1)
A_init = A_init - A_init.T
with torch.no_grad():
A.copy_(A_init)
return A
| state-spaces-main | src/models/nn/exprnn/initialization.py |
# Adapted from https://github.com/Lezcano/expRNN
import torch
import torch.nn as nn
from .parametrization import Parametrization
from src.models.nn.activation import ModReLU
class Orthogonal(Parametrization):
"""Class that implements optimization restricted to the Stiefel manifold."""
def __init__(self, d_input, d_output, initializer_skew, mode, param):
"""
mode: "static" or a tuple such that:
mode[0] == "dynamic"
mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv
mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K.
param: A parametrization of in terms of skew-symmetyric matrices
"""
max_size = max(d_input, d_output)
A = torch.empty(max_size, max_size)
base = torch.empty(d_input, d_output)
super(Orthogonal, self).__init__(A, base, mode)
self.d_input = d_input
self.d_output = d_output
self.param = param
self.init_A = initializer_skew
self.init_base = nn.init.eye_
self.reset_parameters()
def reset_parameters(self):
self.init_A(self.A)
self.init_base(self.base)
def forward(self, input):
return input.matmul(self.B)
def retraction(self, A, base):
# This could be any parametrization of a tangent space
A = A.triu(diagonal=1)
A = A - A.t()
B = base.mm(self.param(A))
if self.d_input != self.d_output:
B = B[:self.d_input, :self.d_output]
return B
def project(self, base):
try:
# Compute the projection using the thin SVD decomposition
U, _, V = torch.svd(base, some=True)
return U.mm(V.t())
except RuntimeError:
# If the svd does not converge, fallback to the (thin) QR decomposition
x = base
if base.size(0) < base.size(1):
x = base.t()
ret = torch.qr(x, some=True).Q
if base.size(0) < base.size(1):
ret = ret.t()
return ret
class OrthogonalRNN(nn.Module):
def __init__(self, d_input, d_model, initializer_skew, mode, param):
super(OrthogonalRNN, self).__init__()
self.d_input = d_input
self.d_model = d_model
self.recurrent_kernel = Orthogonal(d_model, d_model, initializer_skew, mode, param=param)
self.input_kernel = nn.Linear(in_features=self.d_input, out_features=self.d_model, bias=False)
self.nonlinearity = ModReLU(d_model)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.input_kernel.weight.data, nonlinearity="relu")
def default_hidden(self, input):
return input.new_zeros(input.size(0), self.d_model, requires_grad=False)
def forward(self, input, hidden):
input = self.input_kernel(input)
hidden = self.recurrent_kernel(hidden)
out = input + hidden
out = self.nonlinearity(out)
return out, out
| state-spaces-main | src/models/nn/exprnn/orthogonal.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
from .expm32 import expm32, differential
def cayley_map(X):
n = X.size(0)
Id = torch.eye(n, dtype=X.dtype, device=X.device)
return torch.solve(Id - X, Id + X)[0]
class expm_class(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
ctx.save_for_backward(A)
return expm32(A)
@staticmethod
def backward(ctx, G):
(A,) = ctx.saved_tensors
return differential(expm32, A.t(), G)
expm = expm_class.apply
| state-spaces-main | src/models/nn/exprnn/trivializations.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
import torch.nn as nn
def get_parameters(model):
parametrized_params = []
def get_parametrized_params(mod):
nonlocal parametrized_params
if isinstance(mod, Parametrization):
parametrized_params.append(mod.A)
def not_in(elem, l):
return all(elem is not x for x in l)
model.apply(get_parametrized_params)
unconstrained_params = (param for param in model.parameters() if not_in(param, parametrized_params))
return unconstrained_params, parametrized_params
class Parametrization(nn.Module):
"""Implements the parametrization of a manifold in terms of a Euclidean space.
It gives the parametrized matrix through the attribute `B`.
To use it, subclass it and implement the method `retraction` and the method `forward` (and optionally `project`). See the documentation in these methods for details.
You can find an example in the file `orthogonal.py` where we implement the Orthogonal class to optimize over the Stiefel manifold using an arbitrary retraction.
"""
def __init__(self, A, base, mode):
"""
mode: "static" or a tuple such that:
mode[0] == "dynamic"
mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv
mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K.
"""
super(Parametrization, self).__init__()
assert mode == "static" or (isinstance(mode, tuple) and len(mode) == 3 and mode[0] == "dynamic")
self.A = nn.Parameter(A)
self.register_buffer("_B", None)
self.register_buffer('base', base)
# This is necessary, as it will be generated again the first time that self.B is called
# We still need to register the buffer though
if mode == "static":
self.mode = mode
else:
self.mode = mode[0]
self.K = mode[1]
self.M = mode[2]
self.k = 0
self.m = 0
# This implements the parametrization trick in a rather slick way.
# We put a hook on A, such that, whenever its gradients are computed, we
# get rid of self._B so that it has to be recomputed the next time that
# self.B is accessed
def hook(grad):
nonlocal self
self._B = None
self.A.register_hook(hook)
def rebase(self):
with torch.no_grad():
self.base.data.copy_(self._B.data)
self.A.data.zero_()
@property
def B(self):
not_B = self._B is None
if not_B or (not self._B.grad_fn and torch.is_grad_enabled()):
self._B = self.retraction(self.A, self.base)
# Just to be safe
self._B.requires_grad_()
# Now self._B it's not a leaf tensor, so we convert it into a leaf
self._B.retain_grad()
# Increment the counters for the dyntriv algorithm if we have generated B
if self.mode == "dynamic" and not_B:
if self.k == 0:
self.rebase()
# Project the base back to the manifold every M changes of base
# Increment the counter before as we don't project the first time
self.m = (self.m + 1) % self.M
# It's optional to implement this method
if self.m == 0 and hasattr(self, "project"):
with torch.no_grad():
self.base = self.project(self.base)
# Change the basis after K optimization steps
# Increment the counter afterwards as we change the basis in the first iteration
if self.K != "infty":
self.k = (self.k + 1) % self.K
else:
# Make sure that we just update the base once
if self.k == 0:
self.k = 1
return self._B
def retraction(self, A, base):
"""Computes r_{base}(A).
Notice that A will not always be in the tangent space of our manifold.
For this reason, we first have to use A to parametrize the tangent space,
and then compute the retraction.
When dealing with Lie groups, raw_A is always projected into the Lie algebra, as an optimization (cf. Section E in the paper).
"""
raise NotImplementedError
def project(self, base):
"""
This method is OPTIONAL
It returns the projected base back into the manifold
"""
raise NotImplementedError
def forward(self, input):
"""
It uses the attribute self.B to implement the layer itself (e.g. Linear, CNN, ...)
"""
raise NotImplementedError
| state-spaces-main | src/models/nn/exprnn/parametrization.py |
"""Utilities to calculate the transitions of the HiPPO ODE x' = Ax + Bu and discrete-time recurrence approximation.
Note that these modules were heavily used in LSSL, but is no longed needed for S4.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import special as ss
from einops import rearrange
from src.models.hippo.hippo import transition
from src.models.functional.toeplitz import causal_convolution, causal_convolution_inverse, construct_toeplitz
# TODO figure out if we actually need this
try:
from extensions.legt.legt import legt_gbt_forward, legt_gbt_backward, legt_gbt_forward_t, legt_gbt_backward_t
except:
pass
try:
from extensions.trid.trid import trid_gbt_forward, trid_gbt_backward, trid_solve
except:
pass
# from pytorch_memlab import profile
class AdaptiveTransition(nn.Module):
def __init__(self, N, params, trainable=False, lr=1.0, batch=()):
"""
params: dict of Tensors that encode the parameters of the state system A, B.
"""
super().__init__()
self.N = N
self.trainable = trainable
self.batch = batch
if self.trainable:
for name, p in params.items():
p = p.repeat(*batch, *[1]*len(p.shape))
self.register_parameter(name, nn.Parameter(p))
getattr(self, name)._lr = lr
else:
assert batch == (), "If not learnable, Transition should not have a batch dimension"
for name, p in params.items():
self.register_buffer(name, p)
# Register some common buffers
# (helps make sure every subclass has access to them on the right device)
I = torch.eye(N)
self.register_buffer('I', I)
self.register_buffer('ones', torch.ones(N))
self.register_buffer('arange', torch.arange(N))
@property
def A(self):
if self.trainable:
return self._A()
# Cache it the first time this is called
# this must be done here and not in __init__ so all tensors are on the right device
else:
if not hasattr(self, '_cached_A'):
self._cached_A = self._A()
return self._cached_A
@property
def B(self):
if self.trainable:
return self._B()
# Cache it the first time this is called
# this must be done here and not in __init__ so all tensors are on the right device
else:
if not hasattr(self, '_cached_B'):
self._cached_B = self._B()
return self._cached_B
def precompute_forward(self):
raise NotImplementedError
def precompute_backward(self):
raise NotImplementedError
def forward_mult(self, u, delta):
"""Computes (I + delta A) u.
A: (n, n)
u: (..., n)
delta: (...) or scalar
output: (..., n)
"""
raise NotImplementedError
def inverse_mult(self, u, delta): # TODO swap u, delta everywhere
"""Computes (I - d A)^-1 u."""
raise NotImplementedError
def forward_diff(self, d, u, v):
"""Computes the 'forward diff' or Euler update rule: (I - d A)^-1 u + d B v.
d: (...)
u: (..., n)
v: (...)
"""
v = d * v
v = v.unsqueeze(-1) * self.B
x = self.forward_mult(u, d)
x = x + v
return x
def backward_diff(self, d, u, v):
"""Computes the 'forward diff' or Euler update rule: (I - d A)^-1 u + d (I - d A)^-1 B v.
d: (...)
u: (..., n)
v: (...)
"""
v = d * v
v = v.unsqueeze(-1) * self.B
x = u + v
x = self.inverse_mult(x, d)
return x
def bilinear(self, dt, u, v, alpha=.5):
"""Computes the bilinear (aka trapezoid or Tustin's) update rule.
(I - d/2 A)^-1 (I + d/2 A) u + d B (I - d/2 A)^-1 B v
dt: (...)
u: (..., N)
v: (...)
"""
x = self.forward_mult(u, (1-alpha)*dt)
v = dt * v
v = v.unsqueeze(-1) * self.B
x = x + v
x = self.inverse_mult(x, (alpha)*dt)
return x
def zoh(self, dt, u, v):
raise NotImplementedError
def gbt_A(self, dt, alpha=.5):
"""Compute the transition matrices associated with bilinear transform.
dt: (...) broadcastable with self.batch_shape
returns: (..., N, N)
"""
# Solve (N, ...) parallel problems of size N
dims = max(len(dt.shape), len(self.batch))
I = self.I.view([self.N] + [1]*dims + [self.N])
A = self.bilinear(dt, I, dt.new_zeros(*dt.shape), alpha=alpha) # (N, ..., N)
A = rearrange(A, 'n ... m -> ... m n', n=self.N, m=self.N)
return A
def gbt_B(self, dt, alpha=.5):
B = self.bilinear(dt, dt.new_zeros(*dt.shape, self.N), dt.new_ones(1), alpha=alpha) # (..., N)
return B
class ManualAdaptiveTransition(AdaptiveTransition):
def __init__(self, N, A, B, **kwargs):
"""
A: (N, N)
B: (N,)
"""
super().__init__(N, {'a': A, 'b': B}, **kwargs)
def _A(self):
return self.a
def _B(self):
return self.b
# TODO necessary?
def precompute_forward(self, delta):
return self.I + delta*self.A
def precompute_backward(self, delta):
return torch.linalg.solve(self.I - delta*self.A, self.I)[0]
def quadratic(self, x, y):
"""Implements the quadratic form given by the A matrix.
x : (..., N)
y : (..., N)
returns: x^T A y (...)
"""
return torch.sum((self.A @ y.unsqueeze(-1)).squeeze(-1) * x, dim=-1)
def forward_mult(self, u, delta, transpose=False):
"""Computes (I + d A) u.
A: (n, n)
u: (b1* d, n) d represents memory_size
delta: (b2*, d) or scalar
Assume len(b2) <= len(b1)
output: (broadcast(b1, b2)*, d, n)
"""
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1)
A_ = self.A.transpose(-1, -2) if transpose else self.A
x = (A_ @ u.unsqueeze(-1)).squeeze(-1)
x = u + delta * x
return x
def inverse_mult(self, u, delta, transpose=False):
"""Computes (I - d A)^-1 u."""
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1).unsqueeze(-1)
_A = self.I - delta * self.A
if transpose: _A = _A.transpose(-1, -2)
# x = torch.linalg.solve(_A, u.unsqueeze(-1)).squeeze(-1)
# TODO pass in a flag to toggle the two codepaths depending on how big the problem is
xs = []
for _A_, u_ in zip(*torch.broadcast_tensors(_A, u.unsqueeze(-1))):
x_ = torch.linalg.solve(_A_, u_[...,:1]).squeeze(-1)
xs.append(x_)
x = torch.stack(xs, dim=0)
return x
class OPManualAdaptiveTransition(ManualAdaptiveTransition):
measure = None
def __init__(self, N, verbose=False, measure_args={}, **kwargs):
""" Slow (n^3, or n^2 if step sizes are cached) version via manual matrix mult/inv
delta: optional list of step sizes to cache the transitions for
"""
A, B = transition(type(self).measure, N, **measure_args)
A = torch.as_tensor(A, dtype=torch.float)
B = torch.as_tensor(B, dtype=torch.float)[:, 0]
super().__init__(N, A, B, **kwargs)
if verbose:
print(f"{self.__class__}\n A {self.A}\nB {self.B}")
class LegSAdaptiveTransitionManual(OPManualAdaptiveTransition):
measure = 'legs'
class LegTAdaptiveTransitionManual(OPManualAdaptiveTransition):
measure = 'legt'
class LagTAdaptiveTransitionManual(OPManualAdaptiveTransition):
measure = 'lagt'
class TLagTAdaptiveTransitionManual(OPManualAdaptiveTransition):
measure = 'tlagt'
class GLagTAdaptiveTransitionManual(OPManualAdaptiveTransition):
measure = 'glagt'
# TODO this class is not learnable for now (will have to change the shape of a, b to [1])
class CumsumAdaptiveTransition(AdaptiveTransition):
def __init__(self, N, a, b):
"""Implements update for matrix A = -(L+aI) for forward, backward, bilinear, zoh discretizations.
a: scalar, the element on the diagonal
b: scalar, so that B = b * ones vector
"""
# Can't wrap scalars with torch.Tensor(), while torch.tensor(a) gives double instead of float or something
# super().__init__(N, {'a': [a], 'b': [b]}, **kwargs) # TODO this should register b and then construct self.B using a @property, like in Toeplitz (but is slightly slower in the non-learnable case)
params = {
'a': torch.tensor(a, dtype=torch.float),
'b': torch.tensor(b, dtype=torch.float),
}
super().__init__(N, params)
# self.N = N
# self.a = a
# self.b = b
# self.register_buffer('A', self.construct_A())
# self.register_buffer('B', b * torch.ones(N))
# self.register_buffer('I', torch.eye(N))
self.register_buffer('arange', torch.arange(N-1))
def _A(self):
L = torch.tril(self.ones.repeat(self.N, 1))
D = self.a * self.I
return -(L+D)
def _B(self):
return self.b * self.ones
def quadratic(self, x, y):
"""
x : (..., N)
y : (..., N)
returns: x^T A y (...)
"""
return torch.sum((self.A @ y.unsqueeze(-1)).squeeze(-1) * x, dim=-1)
def precompute_forward(self, delta):
"""Store elements along the diagonals of (I + d A)."""
if isinstance(delta, float):
delta = torch.tensor(delta).to(self.I)
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1)
a_ = 1. - delta * self.a # (..., 1)
if self.N == 1:
return a_
return torch.cat((a_, -delta*delta.new_ones(self.N-1)), -1) # (..., N)
def precompute_backward(self, delta): # TODO should be called inverse?
"""Store elements along the diagonals of (I - d A)^{-1}.
# a' = a + 1/dt
delta: (...)
output: (..., N)
"""
if isinstance(delta, float):
delta = torch.tensor(delta).to(self.I)
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1)
if self.N == 1:
return 1. / (1. + self.a*delta + delta)
ad = self.a*delta # (..., 1)
ad_p1 = 1 + ad
denom = ad_p1 + delta # 1 + a'
denom_inv = denom.reciprocal() # 1. / denom
s = - delta * denom_inv * denom_inv # -1/(1+a')^2
b = ad_p1 * denom_inv # a' / (1 + a')
pows = b ** self.arange ## TODO benchmark against cumprod or cumsum in log space
tail = s * pows
ret = torch.cat((denom_inv, tail), -1)
return ret
# ad = self.a*delta # (..., 1)
# denom = 1 + ad + delta
# s = - delta / denom# -1/(1+a')
# b = (1 + ad) / denom # a' / (1 + a')
# # pows = b ** torch.arange(self.N-1).to(self.I) ## TODO benchmark against cumprod or cumsum in log space
# pows = b ** self.arange ## TODO benchmark against cumprod or cumsum in log space
# tail = s * pows
# ret = torch.cat((tail.new_ones(tail.shape[:-1]+(1,)), tail), -1)
# ret = ret / denom
# return ret
def precompute_gbt_A(self, delta, alpha=0.5):
"""Return the A matrix of the gbt discretization."""
c = self.precompute_forward((1.-alpha)*delta)
d = self.precompute_backward(alpha*delta)
return causal_convolution(c, d)
def precompute_gbt_B(self, delta, alpha=0.5):
"""Return the B matrix of the gbt discretization."""
d = self.precompute_backward(alpha*delta)
# return causal_convolution(d, torch.ones_like(d)) * self.b
return torch.cumsum(d, -1) * self.b
def forward_mult(self, u, delta, transpose=False):
"""Computes (I + delta A) u.
A: (n, n)
u: (..., n)
delta: (...) or scalar
output: (..., n)
"""
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1)
if transpose:
x = torch.cumsum(u.flip(-1), -1).flip(-1)
else:
x = torch.cumsum(u, -1)
x = x + u * self.a
x = u - delta * x # Because A is negated in the representation
return x
def inverse_mult(self, u, delta, transpose=False):
"""Computes (I - d A)^-1 u."""
# if isinstance(delta, torch.Tensor):
# delta = delta.unsqueeze(-1)
# if isinstance(delta, float) and delta in self.backward_cache:
# c = self.backward_cache[delta]
# else:
# c = self.precompute_backward(delta, **kwargs)
c = self.precompute_backward(delta)
if transpose:
x = causal_convolution(c, u.flip(-1)).flip(-1)
else:
x = causal_convolution(c, u)
return x
class LagTCumsumAdaptiveTransition(CumsumAdaptiveTransition):
measure = 'lagt'
def __init__(self, N, beta=1.0):
# super().__init__(N, -0.5, 1.0)
super().__init__(N, -0.5, beta)
# print(f"LagTCumsumAdaptiveTransition:\n A {self.A}\nB {self.B}")
class TLagTCumsumAdaptiveTransition(CumsumAdaptiveTransition):
measure = 'tlagt'
def __init__(self, N, beta=1.0):
super().__init__(N, -(1.-beta)/2, beta)
# print(f"LagTCumsumAdaptiveTransition:\n A {self.A}\nB {self.B}")
class GLagTCumsumAdaptiveTransition(CumsumAdaptiveTransition):
measure = 'glagt'
def __init__(self, N, alpha=0.0, beta=0.01):
# TODO this is completely broken
raise NotImplementedError
# super().__init__(N, -(1.-beta)/2, beta)
# print(f"GLagTCumsumAdaptiveTransition:\n A {self.A}\nB {self.B}")
class LegTAdaptiveTransition(AdaptiveTransition):
def __init__(self, N): # this class is not trainable
A, B = transition('legt', N)
A = torch.as_tensor(A, dtype=torch.float)
B = torch.as_tensor(B, dtype=torch.float)[:, 0]
super().__init__(N, {'a': A, 'b': B})
def _A(self):
return self.a
def _B(self):
return self.b
def forward_mult(self, u, delta, transpose=False):
if transpose: return legt_gbt_forward_t(delta, u, transpose=True) # TODO this is all broken
else: return legt_gbt_forward(delta, u)
def inverse_mult(self, u, delta, transpose=False):
if transpose: return legt_gbt_backward_t(-delta, u, transpose=True)
else: return legt_gbt_backward(-delta, u)
def quadratic(self, x, y):
"""
x : (..., N)
y : (..., N)
returns: x^T A y (...)
"""
# TODO Should use fast mult... also check if we even need this anymore
return torch.sum((self.A @ y.unsqueeze(-1)).squeeze(-1) * x, dim=-1)
class TriDInverseAdaptiveTransition(AdaptiveTransition):
# NOTE stores matrix for x' = -Ax + Bu instead of x' = Ax + Bu.
def __init__(self, N, dl, d, du, pl, pr, c, b, **kwargs):
params = {
'dl': dl,
'd': d,
'du': du,
'pl': pl,
'pr': pr,
'c': c,
'b': b,
}
super().__init__(N, params, **kwargs)
def _A(self):
"""The matrix A for system x' = -Ax + Bu."""
A = trid_solve(self.I, self.dl, self.d, self.du).transpose(-1, -2)
A = A + self.c*self.I
A = self.pl.unsqueeze(-1) * A * self.pr
return A
def _B(self):
return self.pl * self.b
def forward_mult(self, u, delta, transpose=False):
du = self.du
d = self.d
dl = self.dl
pr = self.pr
pl = self.pl
c = self.c
if transpose:
return trid_gbt_forward(
delta, u,
du, d, dl, pr, pl, c,
)
else:
return trid_gbt_forward(
delta, u,
dl, d, du, pl, pr, c,
)
def inverse_mult(self, u, delta, transpose=False):
du = self.du
d = self.d
dl = self.dl
pr = self.pr
pl = self.pl
c = self.c
if transpose:
return trid_gbt_backward(
delta, u,
du, d, dl, pr, pl, c,
)
else:
return trid_gbt_backward(
delta, u,
dl, d, du, pl, pr, c,
)
# TODO turn this into class method
def _diag(N, c): return F.pad(torch.ones(N-1), (1, 1)) * c
class LegTTriDInverseAdaptiveTransition(TriDInverseAdaptiveTransition):
def __init__(self, N, corners=3, **kwargs):
p = torch.sqrt(1+2*torch.arange(N))
# p = torch.ones(N)
dl = _diag(N, -.5) # + F.pad(torch.randn(N-1)*1e-4, (1, 1))
du = _diag(N, .5) # + F.pad(torch.randn(N-1)*1e-4, (1, 1))
d = torch.zeros(N) + torch.randn(N)*1e-2
if corners == 0:
pass
elif corners == 1:
d[0] += .5
elif corners == 2:
d[-1] += .5
elif corners == 3:
d[0] += .5
d[-1] += .5
else: raise NotImplementedError
c = torch.ones(N) * 0. # + torch.randn(N)*1e-4
super().__init__(N, dl, d, du, p, p, c, torch.ones(N), **kwargs)
class LagTTriDInverseAdaptiveTransition(TriDInverseAdaptiveTransition):
def __init__(self, N, **kwargs):
p = torch.ones(N)
dl = _diag(N, -1.)
du = _diag(N, 0.)
d = torch.ones(N)
c = torch.ones(N) * -.5
super().__init__(N, dl, d, du, p, p, c, torch.ones(N), **kwargs)
class LegSTriDInverseAdaptiveTransition(TriDInverseAdaptiveTransition):
def __init__(self, N, diag_scale=2, diag_add=True, **kwargs):
# print(diag_scale, kwargs)
if diag_scale == 2:
p = torch.sqrt(2*torch.arange(N)+1)
elif diag_scale == 1:
p = torch.sqrt(torch.arange(N)+1)
elif diag_scale == 0:
p = torch.ones(N)
else: raise NotImplementedError
dl = _diag(N, -1.)
du = _diag(N, 0.)
d = torch.ones(N)
if diag_add:
c = - torch.arange(N) / (2*torch.arange(N)+1)
else:
c = - .5 * torch.ones(N)
super().__init__(N, dl, d, du, p, p, c, torch.ones(N), **kwargs)
# print(self.A)
class JacTriDInverseAdaptiveTransition(TriDInverseAdaptiveTransition):
def __init__(self, N, halve=False, double_B=True, **kwargs):
# print(diag_scale, kwargs)
p = torch.sqrt(2*torch.arange(N)+2)
dl = _diag(N, -1.)
du = _diag(N, 0.)
d = torch.ones(N)
if halve:
c = - .5 * torch.ones(N)
else:
c = 0.0 * torch.ones(N)
if double_B:
B = 2 * torch.ones(N)
else:
B = torch.ones(N)
super().__init__(N, dl, d, du, p, p, c, B, **kwargs)
# print(self.A)
class ChebITriDInverseAdaptiveTransition(TriDInverseAdaptiveTransition):
def __init__(self, N, **kwargs):
# p = torch.sqrt(1+2*torch.arange(N))
p = torch.ones(N)
dl = _diag(N, -.5) # + F.pad(torch.randn(N-1)*1e-4, (1, 1))
du = _diag(N, .5) # + F.pad(torch.randn(N-1)*1e-4, (1, 1))
d = torch.zeros(N) + torch.randn(N)*1e-3
# d = torch.zeros(N)
# d[0] += .5
# d[-1] += .5
dl[0] *= 2.**.5
du[0] *= 2.**.5
c = torch.ones(N) * 0. # + torch.randn(N)*1e-4
super().__init__(N, dl, d, du, p, p, c, torch.ones(N), **kwargs)
class ChebIITriDInverseAdaptiveTransition(TriDInverseAdaptiveTransition):
def __init__(self, N, **kwargs):
p = torch.ones(N)
du = _diag(N, .5)
# du = 2.0 * du
# dl = _diag(N, -.5) + F.pad(torch.randn(N-1)*2e-1, (1, 1))
# dl = F.pad(torch.randn(N-1), (1,1)) * .5
dl = -du
d = torch.zeros(N) + torch.randn(N)*1e-3
# d = torch.zeros(N)
c = torch.ones(N) * 0. # + torch.randn(N)*1e-4
super().__init__(N, dl, d, du, p, p, c, torch.ones(N), **kwargs)
class ToeplitzAdaptiveTransition(AdaptiveTransition):
# NOTE stores matrix for x' = -Ax + Bu instead of x' = Ax + Bu
def __init__(self, N, a, b, c, **kwargs):
"""Implements update for lower triangular Toeplitz transitions A.
a: represents the diagonals of a lower triangular Toeplitz transition matrix
b: B transition matrix
c: scaling factors
A = c a c^{-1}, B = c b (note that c represents \Lambda^{-1} in the HiPPO paper)
"""
super().__init__(N, {'a': a, 'c': c, 'b': b}, **kwargs)
e = torch.zeros(N)
e[0] = 1.0
self.register_buffer('e', e) # for convenience
def _A(self): # TODO do this for all classes? how to know when to cache A or not?
# Z = torch.diag_embed(torch.ones(self.N-1), -1).to(self.a)
# [21-09-14 TODO] changed the krylov construction but haven't tested
# Z = torch.diag_embed(self.ones[:-1], -1)
# A = krylov(self.N, Z, self.a) # TODO use toeplitz.toeplitz_krylov_fast instead
A = construct_toeplitz(self.a)
A = A.transpose(0, 1)
A = self.c.unsqueeze(-1) * A * self.c.reciprocal()
return A
# @property
def _B(self):
return self.c * self.b
# TODO do we need the gbt_A() and gbt_B() methods to materialize the GBT matrices faster?
def quadratic(self, x, y): # TODO need this? also, move to main superclass
"""
x : (..., N)
y : (..., N)
returns: x^T A y (...)
"""
return torch.sum((self.A @ y.unsqueeze(-1)).squeeze(-1) * x, dim=-1)
def _mult(self, t, u, transpose):
if transpose:
x = self.c * u
x = causal_convolution(t, x.flip(-1)).flip(-1)
x = self.c.reciprocal() * x
else:
x = self.c.reciprocal() * u
x = causal_convolution(t, x)
x = self.c * x
return x
def forward_mult(self, u, delta, transpose=False):
"""Computes y = (I - delta A) u.
self.a: (..., n)
u: (..., n)
delta: (...)
x: (..., n)
"""
t = self.e - delta.unsqueeze(-1) * self.a # represents (I - delta A)
return self._mult(t, u, transpose)
def inverse_mult(self, u, delta, transpose=False):
"""Computes (I + d A)^-1 u."""
t = self.e + delta.unsqueeze(-1) * self.a
t_ = causal_convolution_inverse(t) # represents (I + delta A)^-1
return self._mult(t_, u, transpose)
class LagTToeplitzAdaptiveTransition(ToeplitzAdaptiveTransition):
def __init__(self, N, **kwargs):
a = torch.ones(N)
a[..., 0] = .5
b = torch.ones(N)
c = torch.ones(N)
super().__init__(N, a, b, c, **kwargs)
class GLagTToeplitzAdaptiveTransition(ToeplitzAdaptiveTransition):
def __init__(self, N, alpha=0.0, beta=0.01, **kwargs):
a = torch.ones(N)
a[..., 0] = (1. + beta) / 2.
# b = torch.ones(N)
b = ss.binom(alpha + np.arange(N), np.arange(N)) * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
b = torch.as_tensor(b, dtype=torch.float)
# c = torch.ones(N)
c = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
c = 1. / c
c = torch.as_tensor(c, dtype=torch.float)
super().__init__(N, a, b, c, **kwargs)
| state-spaces-main | src/models/hippo/transition.py |
"""Standalone implementation of HiPPO operators.
Contains experiments for the function reconstruction experiment in original HiPPO paper,
as well as new animations from "How to Train Your HiPPO".
This file ports the notebook notebooks/hippo_function_approximation.ipynb,
which is recommended if Jupyter is supported.
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import numpy as np
from scipy import signal
from scipy import linalg as la
from scipy import special as ss
from einops import rearrange, repeat, reduce
import src.models.functional.unroll as unroll # Not necessary, can comment out and set fast=False in HiPPO modules
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import seaborn as sns
sns.set(rc={
"figure.dpi":300,
'savefig.dpi':300,
'animation.html':'jshtml',
'animation.embed_limit':100, # Max animation size in Mb
})
# sns.set_context('notebook')
sns.set_style('ticks') # or 'whitegrid'
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# HiPPO matrices
def transition(measure, N, **measure_args):
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
elif measure == 'fourier':
freqs = np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = 2*np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2
B[0] = 2**.5
A = A - B[:, None] * B[None, :]
# A = A - np.eye(N)
B *= 2**.5
B = B[:, None]
return A, B
def measure(method, c=0.0):
if method == 'legt':
fn = lambda x: np.heaviside(x, 0.0) * np.heaviside(1.0-x, 0.0)
elif method == 'legs':
fn = lambda x: np.heaviside(x, 1.0) * np.exp(-x)
elif method == 'lagt':
fn = lambda x: np.heaviside(x, 1.0) * np.exp(-x)
elif method in ['fourier']:
fn = lambda x: np.heaviside(x, 1.0) * np.heaviside(1.0-x, 1.0)
else: raise NotImplementedError
fn_tilted = lambda x: np.exp(c*x) * fn(x)
return fn_tilted
def basis(method, N, vals, c=0.0, truncate_measure=True):
"""
vals: list of times (forward in time)
returns: shape (T, N) where T is length of vals
"""
if method == 'legt':
eval_matrix = ss.eval_legendre(np.arange(N)[:, None], 2*vals-1).T
eval_matrix *= (2*np.arange(N)+1)**.5 * (-1)**np.arange(N)
elif method == 'legs':
_vals = np.exp(-vals)
eval_matrix = ss.eval_legendre(np.arange(N)[:, None], 1-2*_vals).T # (L, N)
eval_matrix *= (2*np.arange(N)+1)**.5 * (-1)**np.arange(N)
elif method == 'lagt':
vals = vals[::-1]
eval_matrix = ss.eval_genlaguerre(np.arange(N)[:, None], 0, vals)
eval_matrix = eval_matrix * np.exp(-vals / 2)
eval_matrix = eval_matrix.T
elif method == 'fourier':
cos = 2**.5 * np.cos(2*np.pi*np.arange(N//2)[:, None]*(vals)) # (N/2, T/dt)
sin = 2**.5 * np.sin(2*np.pi*np.arange(N//2)[:, None]*(vals)) # (N/2, T/dt)
cos[0] /= 2**.5
eval_matrix = np.stack([cos.T, sin.T], axis=-1).reshape(-1, N) # (T/dt, N)
# print("eval_matrix shape", eval_matrix.shape)
if truncate_measure:
eval_matrix[measure(method)(vals) == 0.0] = 0.0
p = torch.tensor(eval_matrix)
p *= np.exp(-c*vals)[:, None] # [::-1, None]
return p
class HiPPOScale(nn.Module):
"""Vanilla HiPPO-LegS model (scale invariant instead of time invariant)."""
def __init__(self, N, method='legs', max_length=1024, discretization='bilinear'):
"""
max_length: maximum sequence length
"""
super().__init__()
self.N = N
A, B = transition(method, N)
B = B.squeeze(-1)
A_stacked = np.empty((max_length, N, N), dtype=A.dtype)
B_stacked = np.empty((max_length, N), dtype=B.dtype)
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
if discretization == 'forward':
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization == 'backward':
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization == 'bilinear':
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
else: # ZOH
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
self.register_buffer('A_stacked', torch.Tensor(A_stacked)) # (max_length, N, N)
self.register_buffer('B_stacked', torch.Tensor(B_stacked)) # (max_length, N)
vals = np.linspace(0.0, 1.0, max_length)
self.eval_matrix = torch.Tensor((B[:, None] * ss.eval_legendre(np.arange(N)[:, None], 2 * vals - 1)).T )
def forward(self, inputs, fast=True):
"""
inputs : (length, ...)
output : (length, ..., N) where N is the order of the HiPPO projection
"""
L = inputs.shape[0]
inputs = inputs.unsqueeze(-1)
u = torch.transpose(inputs, 0, -2)
u = u * self.B_stacked[:L]
u = torch.transpose(u, 0, -2) # (length, ..., N)
if fast:
result = unroll.variable_unroll_matrix(self.A_stacked[:L], u)
return result
c = torch.zeros(u.shape[1:]).to(inputs)
cs = []
for t, f in enumerate(inputs):
c = F.linear(c, self.A_stacked[t]) + self.B_stacked[t] * f
cs.append(c)
return torch.stack(cs, dim=0)
def reconstruct(self, c):
a = self.eval_matrix.to(c) @ c.unsqueeze(-1)
return a
class HiPPO(nn.Module):
"""Linear time invariant x' = Ax + Bu."""
def __init__(self, N, method='legt', dt=1.0, T=1.0, discretization='bilinear', scale=False, c=0.0):
"""
N: the order of the HiPPO projection
dt: discretization step size - should be roughly inverse to the length of the sequence
"""
super().__init__()
self.method = method
self.N = N
self.dt = dt
self.T = T
self.c = c
A, B = transition(method, N)
A = A + np.eye(N)*c
self.A = A
self.B = B.squeeze(-1)
self.measure_fn = measure(method)
C = np.ones((1, N))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dB = dB.squeeze(-1)
self.register_buffer('dA', torch.Tensor(dA)) # (N, N)
self.register_buffer('dB', torch.Tensor(dB)) # (N,)
self.vals = np.arange(0.0, T, dt)
self.eval_matrix = basis(self.method, self.N, self.vals, c=self.c) # (T/dt, N)
self.measure = measure(self.method)(self.vals)
def forward(self, inputs, fast=True):
"""
inputs : (length, ...)
output : (length, ..., N) where N is the order of the HiPPO projection
"""
inputs = inputs.unsqueeze(-1)
u = inputs * self.dB # (length, ..., N)
if fast:
dA = repeat(self.dA, 'm n -> l m n', l=u.size(0))
return unroll.variable_unroll_matrix(dA, u)
c = torch.zeros(u.shape[1:]).to(inputs)
cs = []
for f in inputs:
c = F.linear(c, self.dA) + self.dB * f
cs.append(c)
return torch.stack(cs, dim=0)
def reconstruct(self, c, evals=None): # TODO take in a times array for reconstruction
"""
c: (..., N,) HiPPO coefficients (same as x(t) in S4 notation)
output: (..., L,)
"""
if evals is not None:
eval_matrix = basis(self.method, self.N, evals)
else:
eval_matrix = self.eval_matrix
m = self.measure[self.measure != 0.0]
c = c.unsqueeze(-1)
y = eval_matrix.to(c) @ c
return y.squeeze(-1).flip(-1)
### Synthetic data generation
def whitesignal(period, dt, freq, rms=0.5, batch_shape=()):
"""
Produces output signal of length period / dt, band-limited to frequency freq
Output shape (*batch_shape, period/dt)
Adapted from the nengo library
"""
if freq is not None and freq < 1. / period:
raise ValueError(f"Make ``{freq=} >= 1. / {period=}`` to produce a non-zero signal",)
nyquist_cutoff = 0.5 / dt
if freq > nyquist_cutoff:
raise ValueError(f"{freq} must not exceed the Nyquist frequency for the given dt ({nyquist_cutoff:0.3f})")
n_coefficients = int(np.ceil(period / dt / 2.))
shape = batch_shape + (n_coefficients + 1,)
sigma = rms * np.sqrt(0.5)
coefficients = 1j * np.random.normal(0., sigma, size=shape)
coefficients[..., -1] = 0.
coefficients += np.random.normal(0., sigma, size=shape)
coefficients[..., 0] = 0.
set_to_zero = np.fft.rfftfreq(2 * n_coefficients, d=dt) > freq
coefficients *= (1-set_to_zero)
power_correction = np.sqrt(1. - np.sum(set_to_zero, dtype=float) / n_coefficients)
if power_correction > 0.: coefficients /= power_correction
coefficients *= np.sqrt(2 * n_coefficients)
signal = np.fft.irfft(coefficients, axis=-1)
signal = signal - signal[..., :1] # Start from 0
return signal
def plot(T, dt, N, freq):
np.random.seed(0)
vals = np.arange(0.0, T, dt)
u = whitesignal(T, dt, freq=freq)
u = torch.tensor(u, dtype=torch.float)
u = u.to(device)
plt.figure(figsize=(16, 8))
offset = 0.0
plt.plot(vals, u.cpu()+offset, 'k', linewidth=1.0)
# Linear Time Invariant (LTI) methods x' = Ax + Bu
lti_methods = [
'legs',
'legt',
'fourier',
]
for method in lti_methods:
hippo = HiPPO(method=method, N=N, dt=dt, T=T).to(device)
u_hippo = hippo.reconstruct(hippo(u))[-1].cpu()
plt.plot(vals[-len(u_hippo):], u_hippo, label=method)
# Original HiPPO-LegS, which uses time-varying SSM x' = 1/t [ Ax + Bu]
# we call this "linear scale invariant"
lsi_methods = ['legs']
for method in lsi_methods:
hippo = HiPPOScale(N=N, method=method, max_length=int(T/dt)).to(device)
u_hippo = hippo.reconstruct(hippo(u))[-1].cpu()
plt.plot(vals[-len(u_hippo):], u_hippo, label=method+' (scaled)')
# plt.xlabel('Time (normalized)', labelpad=-10)
plt.legend()
plt.savefig(f'function_approximation.pdf', bbox_inches='tight')
plt.show()
plt.close()
# Animation code from HTTYH
def plt_lines(x, y, color, size, label=None):
return plt.plot(x, y, color, linewidth=size, label=label)[0]
def update_lines(ln, x, y):
ln.set_data(x, y)
def animate_hippo(
method,
T=5, dt=5e-4, N=64, freq=20.0,
interval=100,
plot_hippo=False, hippo_offset=0.0, label_hippo=False,
plot_measure=False, measure_offset=-3.0, label_measure=False,
plot_coeff=None, coeff_offset=3.0,
plot_s4=False, s4_offset=6.0,
plot_hippo_type='line', plot_measure_type='line', plot_coeff_type='line',
size=1.0,
plot_legend=True, plot_xticks=True, plot_box=True,
plot_vline=False,
animate_u=False,
seed=2,
):
np.random.seed(seed)
vals = np.arange(0, int(T/dt)+1)
L = int(T/dt)+1
u = torch.FloatTensor(whitesignal(T, dt, freq=freq))
u = F.pad(u, (1, 0))
u = u + torch.FloatTensor(np.sin(1.5*np.pi/T*np.arange(0, T+dt, dt))) # add 3/4 of a sin cycle
u = u.to(device)
hippo = HiPPO(method=method, N=N, dt=dt, T=T).to(device)
coef_hippo = hippo(u).cpu().numpy()
h_hippo = hippo.reconstruct(hippo(u)).cpu().numpy()
u = u.cpu().numpy()
fig, ax = plt.subplots(figsize=(12, 4))
if animate_u:
ln_u = plt_lines([], [], 'k', size, label='Input $u(t)$')
else:
plt_lines(vals, u, 'k', size, label='Input $u(t)$')
if plot_hippo:
label_args = {'label': 'HiPPO reconstruction'} if label_hippo else {}
ln = plt_lines([], [], size=size, color='red', **label_args)
if plot_measure:
label_args = {'label': 'HiPPO Measure'} if label_measure else {}
ln_measure = plt_lines(vals, np.zeros(len(vals))+measure_offset, size=size, color='green', **label_args)
if plot_coeff is None: plot_coeff = []
if isinstance(plot_coeff, int): plot_coeff = [plot_coeff]
if len(plot_coeff) > 0:
ln_coeffs = [
plt_lines([], [], size=size, color='blue')
for _ in plot_coeff
]
plt_lines([], [], size=size, color='blue', label='State $x(t)$') # For the legend
### Y AXIS LIMITS
if plot_measure:
min_y = measure_offset
else:
min_y = np.min(u)
if len(plot_coeff) > 0:
max_u = np.max(u) + coeff_offset
else:
max_u = np.max(u)
C = np.random.random(N)
s4 = np.sum(coef_hippo * C, axis=-1)
max_s4 = 0.0
if plot_s4:
ln_s4 = plt_lines([], [], size=size, color='red', label='Output $y(t)$')
max_s4 = np.max(s4)+s4_offset
if plot_vline:
ln_vline = ax.axvline(0, ls='-', color='k', lw=1)
if plot_legend:
plt.legend(loc='upper left', fontsize='x-small')
def init():
left_endpoint = vals[0]
ax.set_xlim(left_endpoint, vals[-1]+1)
ax.set_ylim(min_y, max(max_u, max_s4))
ax.set_yticks([])
if not plot_xticks: ax.set_xticks([])
if not plot_box: plt.box(False)
return [] # ln,
def update(frame):
if animate_u:
xdata = np.arange(frame)
ydata = u[:frame]
update_lines(ln_u, xdata, ydata)
m = np.zeros(len(vals))
m[:frame] = hippo.measure_fn(np.arange(frame)*dt)[::-1]
xdata = vals
if plot_measure:
update_lines(ln_measure, xdata, m+measure_offset)
if plot_hippo:
ydata = h_hippo[frame] + hippo_offset
m2 = hippo.measure_fn(np.arange(len(ydata))*dt)[::-1]
# Remove reconstruction where measure is 0
ydata[m2 == 0.0] = np.nan
xdata = np.arange(frame-len(ydata), frame)
update_lines(ln, xdata, ydata)
if len(plot_coeff) > 0:
for coeff, ln_coeff in zip(plot_coeff, ln_coeffs):
update_lines(ln_coeff, np.arange(frame), coef_hippo[:frame, coeff] + coeff_offset)
if plot_s4: # Only scale case; scale case should copy plot_hippo logic
update_lines(ln_s4, np.arange(0, frame), s4[:frame] + s4_offset)
if plot_vline:
ln_vline.set_xdata([frame, frame])
return []
ani = FuncAnimation(fig, update,
frames=np.arange(0, int(T*1000/interval)+1)*int(interval/1000/dt),
interval=interval,
init_func=init, blit=True)
return ani
if __name__ == '__main__':
plot(T=3, dt=1e-3, N=64, freq=3.0)
# Visualize HiPPO online reconstruction
ani = animate_hippo(
'legs', # Try 'legt' or 'fourier'
T=5, dt=5e-4, N=64, interval=100,
# T=1, dt=1e-3, N=64, interval=200, # Faster rendering for testing
size=1.0,
animate_u=True,
plot_hippo=True, hippo_offset=0.0, label_hippo=True,
plot_s4=False, s4_offset=6.0,
plot_measure=True, measure_offset=-3.0, label_measure=True,
plot_coeff=[], coeff_offset=3.0,
plot_legend=True, plot_xticks=True, plot_box=True,
plot_vline=True,
)
ani.save('hippo_legs.gif')
# Visualize S4
ani = animate_hippo(
'legs', # Try 'legt' or 'fourier'
T=5, dt=5e-4, N=64, interval=100,
size=1.0,
animate_u=True,
plot_hippo=False, hippo_offset=0.0, label_hippo=True,
plot_s4=True, s4_offset=6.0,
plot_measure=False, measure_offset=-3.0, label_measure=True,
plot_coeff=[0,1,2,3], coeff_offset=3.0,
plot_legend=True, plot_xticks=True, plot_box=True,
plot_vline=True,
)
ani.save('s4_legs.gif')
| state-spaces-main | src/models/hippo/visualizations.py |
"""Definitions of A and B matrices for various HiPPO operators."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import special as ss
from einops import rearrange, repeat
contract = torch.einsum
def embed_c2r(A):
A = rearrange(A, '... m n -> ... m () n ()')
A = np.pad(A, ((0, 0), (0, 1), (0, 0), (0, 1))) + \
np.pad(A, ((0, 0), (1, 0), (0, 0), (1,0)))
return rearrange(A, 'm x n y -> (m x) (n y)')
# TODO take in 'torch' option to return torch instead of numpy, and converts the shape of B from (N, 1) to (N)
# TODO remove tlagt
def transition(measure, N, **measure_args):
"""A, B transition matrices for different measures.
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
elif measure == 'tlagt':
# beta = 1 corresponds to no tilt
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
elif measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# Halve again for timescale correctness
A *= 0.5
B *= 0.5
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
elif measure == 'legsd':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
A += .5 * B*B[None, :, 0]
B = B / 2.0
elif measure in ['fourier_diag', 'foud']:
freqs = np.arange(N//2)
d = np.stack([freqs, np.zeros(N//2)], axis=-1).reshape(-1)[:-1]
A = 2*np.pi*(-np.diag(d, 1) + np.diag(d, -1))
A = A - .5 * np.eye(N)
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
B = B[:, None]
elif measure in ['fourier', 'fout']:
freqs = np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - B[:, None] * B[None, :]
B = B[:, None]
elif measure == 'fourier_decay':
freqs = np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - .5 * B[:, None] * B[None, :]
B = .5 * B[:, None]
elif measure == 'fourier2': # Double everything: orthonormal on [0, 1]
freqs = 2*np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - B[:, None] * B[None, :] * 2
B = B[:, None] * 2
elif measure == 'random':
A = np.random.randn(N, N) / N
B = np.random.randn(N, 1)
elif measure == 'diagonal':
A = -np.diag(np.exp(np.random.randn(N)))
B = np.random.randn(N, 1)
else:
raise NotImplementedError
return A, B
def rank_correction(measure, N, rank=1, dtype=torch.float):
"""Return low-rank matrix L such that A + L is normal."""
if measure == 'legs':
assert rank >= 1
P = torch.sqrt(.5+torch.arange(N, dtype=dtype)).unsqueeze(0) # (1 N)
elif measure == 'legt':
assert rank >= 2
P = torch.sqrt(1+2*torch.arange(N, dtype=dtype)) # (N)
P0 = P.clone()
P0[0::2] = 0.
P1 = P.clone()
P1[1::2] = 0.
P = torch.stack([P0, P1], dim=0) # (2 N)
P *= 2**(-0.5) # Halve the rank correct just like the original matrix was halved
elif measure == 'lagt':
assert rank >= 1
P = .5**.5 * torch.ones(1, N, dtype=dtype)
elif measure in ['fourier', 'fout']:
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = P.unsqueeze(0)
elif measure == 'fourier_decay':
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = P.unsqueeze(0)
P = P / 2**.5
elif measure == 'fourier2':
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = 2**.5 * P.unsqueeze(0)
elif measure in ['fourier_diag', 'foud', 'legsd']:
P = torch.zeros(1, N, dtype=dtype)
else: raise NotImplementedError
d = P.size(0)
if rank > d:
P = torch.cat([P, torch.zeros(rank-d, N, dtype=dtype)], dim=0) # (R N)
return P
def initial_C(measure, N, dtype=torch.float):
"""Return C that captures the other endpoint in the HiPPO approximation."""
if measure == 'legt':
C = (torch.arange(N, dtype=dtype)*2+1)**.5 * (-1)**torch.arange(N)
elif measure == 'fourier':
C = torch.zeros(N)
C[0::2] = 2**.5
C[0] = 1
else:
C = torch.zeros(N, dtype=dtype) # (N)
return C
def nplr(measure, N, rank=1, dtype=torch.float, diagonalize_precision=True, B_clip=2.0):
"""Constructs NPLR form of HiPPO matrices.
Returns w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
measure: Name of HiPPO method.
N: Size of recurrent A matrix (also known as `d_state` elsewhere).
dtype: Single or double precision.
diagonalize_precision: Calculate diagonalization in double precision.
B_clip: Clip values of B, can help with stability. None for no clipping.
"""
assert dtype == torch.float or dtype == torch.double
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype) # (r N)
AP = A + torch.sum(P.unsqueeze(-2)*P.unsqueeze(-1), dim=-3)
# We require AP to be nearly skew-symmetric
_A = AP + AP.transpose(-1, -2)
if (err := torch.sum((_A - _A[0,0]*torch.eye(N))**2) / N) > 1e-5: # if not torch.allclose(_A - _A[0,0]*torch.eye(N), torch.zeros(N, N), atol=1e-5):
print("WARNING: HiPPO matrix not skew symmetric", err)
# Take advantage of identity + skew-symmetric form to calculate real and imaginary parts separately
# Imaginary part can use eigh instead of eig
W_re = torch.mean(torch.diagonal(AP), -1, keepdim=True)
# Diagonalize in double precision
if diagonalize_precision: AP = AP.to(torch.double)
# w, V = torch.linalg.eig(AP) # (..., N) (..., N, N)
W_im, V = torch.linalg.eigh(AP*-1j) # (..., N) (..., N, N)
if diagonalize_precision: W_im, V = W_im.to(cdtype), V.to(cdtype)
W = W_re + 1j * W_im
# Check: V W V^{-1} = A
# print("check", V @ torch.diag_embed(W) @ V.conj().transpose(-1, -2))
# Only keep half of each conjugate pair
_, idx = torch.sort(W.imag)
W_sorted = W[idx]
V_sorted = V[:, idx]
# There is an edge case when eigenvalues can be 0, which requires some machinery to handle
# We use a huge hack here: Assume only one pair is 0, and that it is the first row/column of A (only happens in Fourier case)
V = V_sorted[:, :N//2]
W = W_sorted[:N//2] # Only keep negative imaginary components
assert W[-2].abs() > 1e-4, "Only 1 zero eigenvalue allowed in diagonal part of A"
if W[-1].abs() < 1e-4:
V[:, -1] = 0.
V[0, -1] = 2**-0.5
V[1, -1] = 2**-0.5 * 1j
_AP = V @ torch.diag_embed(W) @ V.conj().transpose(-1, -2)
if ((err := torch.sum((2*_AP.real-AP)**2)/N) > 1e-5):
print("Warning: Diagonalization of A matrix not numerically precise - error", err)
# print("check", V @ torch.diag_embed(W) @ V.conj().transpose(-1, -2))
V_inv = V.conj().transpose(-1, -2)
# C = initial_C(measure, N, dtype=dtype)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
# C = contract('ij, j -> i', V_inv, C.to(V)) # V^* C
P = contract('ij, ...j -> ...i', V_inv, P.to(V)) # V^* P
if B_clip is not None:
B = B.real + 1j*torch.clamp(B.imag, min=-B_clip, max=B_clip)
# W represents the imaginary part of the DPLR form: A = W - PP^*
# Downstream classes just call this A for simplicity,
# which is also more consistent with the diagonal case
return W, P, B, V
| state-spaces-main | src/models/hippo/hippo.py |
from .base import SequenceModule, TransposedModule
| state-spaces-main | src/models/sequence/__init__.py |
"""Defines base class SequenceModule, a modular interface for sequence models."""
from torch import nn
import functools
class SequenceModule(nn.Module):
"""Abstract sequence model class. All models must adhere to this interface.
A SequenceModule is generally a model that transforms an input of shape
(n_batch, l_sequence, d_model) to (n_batch, l_sequence, d_output)
REQUIRED methods and attributes
forward, d_model, d_output: controls standard forward pass, a sequence-to-sequence transformation
__init__ should also satisfy the following interface; see SequenceIdentity for an example
def __init__(self, d_model, transposed=False, **kwargs)
OPTIONAL methods
default_state, step: allows stepping the model recurrently with a hidden state
state_to_tensor, d_state: allows decoding from hidden state
"""
@property
def d_model(self):
"""Model dimension (generally same as input dimension).
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, encoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_model")
return self._d_model
@d_model.setter
def d_model(self, d):
self._d_model = d
@property
def d_output(self):
"""Output dimension of model.
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, decoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_output", None) is None:
raise NotImplementedError("SequenceModule instantiation must specify d_output for decoder")
return self._d_output
@d_output.setter
def d_output(self, d):
self._d_output = d
def forward(self, x, state=None, **kwargs):
"""Forward pass of sequence model, a sequence-to-sequence transformation with an optional state.
Generally, this should map a tensor of shape (batch, length, self.d_model) to (batch, length, self.d_output)
Additionally, it returns a "state" which can be any additional information
For example, RNN and SSM layers may return their hidden state,
while some types of transformer layers (e.g. Transformer-XL) may want to pass a state as well
"""
return x, None
@property
def state_to_tensor(self):
"""Returns a function mapping a state to a single tensor.
This method should be implemented if one wants to use the hidden state instead of the output sequence for final prediction.
Currently only used with the StateDecoder.
"""
return lambda _: None
@property
def d_state(self):
""" Returns dimension of output of self.state_to_tensor """
return None
def default_state(self, *batch_shape, device=None):
"""Create initial state for a batch of inputs."""
return None
def step(self, x, state=None, **kwargs):
"""Step the model recurrently for one step of the input sequence.
For example, this should correspond to unrolling an RNN for one step.
If the forward pass has signature (B, L, H1) -> (B, L, H2),
this method should generally have signature (B, H1) -> (B, H2) with an optional recurrent state.
"""
raise NotImplementedError
def TransposedModule(module):
"""Wrap a SequenceModule class to accept transposed parameter, handle state, absorb kwargs."""
# https://stackoverflow.com/a/65470430/1980685
@functools.wraps(module, updated=())
class TransposedModule(module):
def __init__(self, *args, transposed=False, **kwargs):
super().__init__(*args, **kwargs)
self.transposed = transposed
def forward(self, x, state=None, **kwargs):
if self.transposed: x = x.transpose(-1, -2)
x, next_state = super().forward(x, state) # Don't use kwarg because nn.LSTM
next_state = None if state is None else next_state
if self.transposed: x = x.transpose(-1,-2)
return x, next_state
# https://stackoverflow.com/questions/5352781/how-to-set-class-names-dynamically
# TransposedModule.__name__ = module.__name__ # functools wraps is better solution
return TransposedModule
@TransposedModule
class SequenceIdentity(SequenceModule):
"""Simple SequenceModule for testing purposes."""
def __init__(self, d_model, dropout=0.0, **kwargs):
"""Default interface for SequenceModule
d_model: input dimension (sometimes denoted H for hidden dimension)
transposed: if True, inputs have axis ordering (B, H, L) instead of (B, H, L)
"""
super().__init__()
self.d_model = d_model
self.d_output = d_model
def forward(self, x, state=None):
return x, state
def default_state(self, *batch_shape, device=None):
return None
def step(self, x, state=None, **kwargs):
return x, state
| state-spaces-main | src/models/sequence/base.py |
"""Wrapper around nn.Conv1d to adhere to SequenceModule interface."""
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from models.sequence.base import SequenceModule
from einops import rearrange
import src.models.nn.utils as U
from src.models.nn import Activation
class Conv1d(SequenceModule):
""" Simple wrapper for nn.Conv1d """
def __init__(self, d_model, *args, d_output=None, activation='gelu', dropout=0.0, transposed=True, **kwargs):
# Accepted kwargs passed into Conv1d interface
# torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)
super().__init__()
self.d_model = d_model
if d_output is None: d_output = d_model
self.d_output = d_output
self.transposed = transposed
self.conv1d = nn.Conv1d(d_model, d_output, *args, **kwargs)
self.activation = Activation(activation)
def forward(self, x, resolution=None, state=None, *args, **kwargs):
if not self.transposed: x = x.transpose(-1, -2)
y = self.conv1d(x)
if not self.transposed: y = y.transpose(-1, -2)
y = self.activation(y)
return y, None
def step(self, x, state):
raise NotImplementedError
| state-spaces-main | src/models/sequence/convs/conv1d.py |
"""Wrapper around nn.Conv2d to adhere to SequenceModule interface."""
import torch
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import Activation, DropoutNd
class Conv2d(SequenceModule):
""" Simple wrapper for nn.Conv1d """
def __init__(self, d_model, d_output=None, activation='gelu', depthwise=False, dropout=0.0, tie_dropout=False, transposed=True, **kwargs):
# kwargs passed into Conv2d interface:
# torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)
super().__init__()
valid_kwargs = ["in_channels","out_channels","kernel_size","stride",
"padding","padding_mode","dilation","groups","bias"]
kwargs = {k:v for k,v in kwargs.items() if k in valid_kwargs}
self.d_model = d_model
if d_output is None: d_output = d_model
self.d_output = d_output
self.transposed = transposed
self.depthwise = depthwise
if self.depthwise:
self.conv2d = nn.Conv2d(d_model, d_model, padding='same', groups=d_model, **kwargs)
self.linear = nn.Conv2d(d_model, d_output, 1, 1)
else:
self.conv2d = nn.Conv2d(d_model, d_output, padding='same', **kwargs)
self.linear = nn.Identity()
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
self.activation = Activation(activation)
def forward(self, x, resolution=None, state=None, *args, **kwargs):
if not self.transposed: x = x.transpose(-1, -3)
y = self.conv2d(x)
y = self.activation(y) # NOTE doesn't work with glu
y = self.dropout(y)
y = self.linear(y)
if not self.transposed: y = y.transpose(-1, -3)
return y, None
def step(self, x, state):
raise NotImplementedError
| state-spaces-main | src/models/sequence/convs/conv2d.py |
"""Module for FFT convolution that accepts a flexible kernel parameterization."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.sequence import SequenceModule
from src.models.sequence.kernels import registry as kernel_registry
from src.models.nn import Activation, DropoutNd
contract = torch.einsum
class FFTConv(SequenceModule):
"""Implements an FFT Convolution around a convolution kernel.
d_model (H): Model dimension (in CNN terminology, this would be "channels").
l_max (L): The maximum kernel length. Set l_max=None to always use a global kernel.
channels: Can be interpreted as a number of "heads"; the SSM is a map from a 1-dim to C-dim sequence. It's not recommended to change this; instead, increase d_model for larger models.
bidirectional: If True, convolution kernel will be two-sided.
activation: Activation after the full convolution.
transposed, dropout, tie_dropout: More general model options, see SequenceModule.
mode: Which kernel algorithm to use. 'nplr' is the full S4 model; 'diag' is the simpler S4D. Other options can be found in the kernel registry.
kernel_args: See the class .kernel.SSMKernel for the kernel constructor which accepts kernel_args. Relevant options that are worth considering and tuning include "mode", "init", "dt_min", "dt_max", "lr"
"""
def __init__(
self,
d_model,
l_max=None,
channels=1,
swap_channels=False,
bidirectional=False,
activation=None, # Activation after layer
transposed=True,
dropout=0.0,
tie_dropout=False,
drop_kernel=0.0,
mode='dplr',
kernel=None,
**kernel_args, # Arguments passed into inner convolution kernel
):
super().__init__()
self.d_model = d_model
self.L = self.l_max = l_max
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
self.swap_channels = swap_channels
if activation is not None and activation.startswith('glu'):
channels *= 2
self.activation = Activation(activation, dim=1 if self.transposed else -1)
self.D = nn.Parameter(torch.randn(channels, self.d_model))
if self.bidirectional:
channels *= 2
# Inner convolution kernel
if mode is not None:
assert kernel is None, "Pass either mode or kernel but not both"
# log.info(
# "Argument 'mode' is deprecated and renamed to 'kernel',"
# "and will be removed in a future version."
# )
kernel, mode = mode, kernel
kernel_cls = kernel_registry[kernel]
self.kernel = kernel_cls(
d_model=self.d_model,
l_max=self.l_max,
channels=channels,
**kernel_args,
)
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
self.drop_kernel = nn.Dropout(drop_kernel) if drop_kernel > 0.0 else nn.Identity()
def forward(self, x, state=None, rate=1.0, **kwargs): # absorbs return_output and transformer src mask
"""
x: (B D L) if self.transposed else (B L D)
"""
# Always work with (B D L) dimension in this module
if not self.transposed: x = x.transpose(-1, -2)
L = x.size(-1)
# Compute SS Kernel
l_kernel = L if self.L is None else min(L, round(self.L / rate))
k, k_state = self.kernel(L=l_kernel, rate=rate, state=state) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0))
# The above has an off-by-one in the reverse direction
# This is a deliberate choice since the off-by-one should not affect any applications
# This can be amended which may be very slightly slower
# k = F.pad(k0, (0, L)) \
# + F.pad(k1[..., 1:].flip(-1), (L+1, 0)) \
# + F.pad(k1[..., :1], (0, l_kernel+L-1))
# Kernel dropout
k = self.drop_kernel(k)
# In principle, we could pad to l_kernel+L-1 instead of l_kernel+L, but we choose the latter for
# equational simplicity. Additionally, we have not experimented to compare the efficiency of the two.
k_f = torch.fft.rfft(k, n=l_kernel+L) # (C H L)
x_f = torch.fft.rfft(x, n=l_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', x_f, k_f)
y = torch.fft.irfft(y_f, n=l_kernel+L)[..., :L] # (B C H L)
# Compute D term in state space equation - essentially a skip connection
y = y + contract('bhl,ch->bchl', x, self.D)
# Compute state update
if state is not None:
assert not self.bidirectional, "Bidirectional not supported with state forwarding"
y = y + k_state #
next_state = self.kernel.forward_state(x, state)
else:
next_state = None
# Reshape to flatten channels
if self.swap_channels:
y = rearrange(y, 'b c h l -> b (h c) l')
else:
y = rearrange(y, 'b c h l -> b (c h) l')
y = self.drop(y) # DropoutNd better with transposed=True
if not self.transposed: y = y.transpose(-1, -2)
y = self.activation(y)
return y, next_state
def setup_step(self, **kwargs):
self.kernel._setup_step(**kwargs)
def step(self, x, state):
"""Step one time step as a recurrent model. Intended to be used during validation.
x: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
y, next_state = self.kernel.step(x, state) # (B C H)
y = y + x.unsqueeze(-2) * self.D
y = rearrange(y, 'b c h -> b (c h)')
y = self.activation(y)
return y, next_state
def default_state(self, *batch_shape, device=None):
# kernel is not a SequenceModule so it doesn't need to adhere to same interface
# the kernel will know the device of its own parameters
return self.kernel.default_state(*batch_shape)
@property
def d_state(self):
return self.kernel.d_state
@property
def d_output(self):
return self.d_model * self.channels
@property
def state_to_tensor(self):
return self.kernel.state_to_tensor
| state-spaces-main | src/models/sequence/kernels/fftconv.py |
"""Construct wide convolution kernels."""
from typing import Optional, Mapping, Tuple, Union
from collections import defaultdict
import math
import torch
import torch.nn as nn
import src.utils.train
log = src.utils.train.get_logger(__name__)
class Kernel(nn.Module):
"""Interface for modules that produce convolution kernels.
A main distinction between these and normal Modules is that the forward pass
does not take inputs. It is a mapping from parameters to a tensor that can
be used in other modules, in particular as a convolution kernel.
Because of the unusual parameterization, these kernels may often want special
hyperparameter settings on their parameters. The `register` method provides
an easy interface for controlling this, and is intended to be used with an
optimizer hook that can be found in train.py or example.py.
This class also defines an interface for interacting with kernels *statefully*,
in particular for state space models (SSMs). This interface handles the setting
when a model can be converted from a "CNN" into an "RNN".
_setup_step()
step()
default_state()
forward_state()
See ConvKernel for the simplest instantiation of this interface.
"""
def __init__(
self,
d_model: int = 0,
channels: int = 1,
l_max: Optional[int] = None,
lr: Union[float, Optional[Mapping]] = None,
wd: Union[float, Optional[Mapping]] = 0.0,
verbose: bool = True,
**kwargs,
):
"""General interface.
d_model (H): Model dimension, or number of independent convolution kernels created.
channels (C): Extra dimension in the returned output (see .forward()).
- One interpretation is that it expands the input dimension giving it C separate "heads" per feature.
That is convolving by this kernel maps shape (B L D) -> (B L C D)
- This is also used to implement a particular form of bidirectionality in an efficient way.
- In general for making a more powerful model, instead of increasing C
it is recommended to set channels=1 and adjust H to control parameters instead.
l_max (L): Maximum kernel length (optional). If unspecified, most Kernel instantiations
will return kernels of arbitrary length as passed into .forward().
lr: Optional dictionary specifying special hyperparameters for .register().
Passing in a number (e.g. 0.001) sets attributes of SSM parameters (A, B, dt).
A custom optimizer hook is needed to configure the optimizer to set the learning rates appropriately for these parameters.
wd: Same as lr, but for weight decay.
"""
super().__init__()
assert d_model > 0
self.H = self.d_model = d_model
self.L = self.l_max = l_max
self.channels = channels
self.lr = lr
self.wd = wd
self.verbose = verbose
# Add a catch-all **kwargs to make it easier to change kernels
# without manually moving other options passed in the config.
# Good to log these just so it's explicit.
if self.verbose and len(kwargs) > 0:
log.info(f"{type(self)} extra kwargs: {kwargs}")
# Logic for registering parameters
# Case 1: lr: None | float
# All params should have this lr (None means inherit from global lr)
# Case 2: lr: dict
# Specified params should have that lr, all others should be None
if self.lr is None or isinstance(self.lr, float):
self.lr_dict = defaultdict(lambda: self.lr)
else:
self.lr_dict = defaultdict(lambda: None)
self.lr_dict.update(self.lr)
# Same logic for weight decay
# (but is always just set to 0.0 and hasn't been ablated)
if self.wd is None or isinstance(self.wd, float):
self.wd_dict = defaultdict(lambda: self.wd)
else:
self.wd_dict = defaultdict(lambda: None)
self.wd_dict.update(self.wd)
def forward(self, state=None, rate=1.0, L=None):
"""General interface to generate a global convolution kernel.
state: Initial state for recurrent updates.
E.g. for SSMs, this should have shape (B, H, N) (batch, d_model, d_state).
rate: Relative sampling rate.
L: Target kernel length.
Returns:
- (C, H, L) (channels, d_model, l_kernel) The convolution kernel.
- (B, H, L) (batch, d_model, l_kernel)
Extra information for how the state affects the output of convolving by kernel.
"""
raise NotImplementedError
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
def _setup_step(self, **kwargs):
"""Convert a model into a recurrent mode for autoregressive inference."""
raise NotImplementedError
def step(self, x, state, **kwargs):
"""Step the model for one timestep with input x and recurrent state."""
raise NotImplementedError
def default_state(self, *args, **kwargs):
"""Return a default initial state."""
raise NotImplementedError
@torch.no_grad()
def forward_state(self, u, state):
"""Forward the state through a sequence, i.e. computes the state after passing chunk through the kernel."""
raise NotImplementedError
@property
def d_state(self):
"""Implement this for interfaces that want to interact with a stateful layer (i.e. SSMs).
Currently the only codepath that might use this is the StateDecoder, which is not used.
"""
raise NotImplementedError
@property
def state_to_tensor(self):
"""Same as d_state, only needed for niche codepaths involving recurrent state."""
raise NotImplementedError
class ConvKernel(Kernel):
"""Baseline implemented as a free convolution kernel."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
assert self.L is not None
kernel = torch.randn(self.channels, self.H, self.L) / (self.H*self.L)**0.5
# Register parameters
self.register("kernel", kernel, self.lr_dict['K'], self.wd_dict['K'])
def forward(self, state=None, rate=1.0, L=None):
return self.kernel, None
class EMAKernel(Kernel):
"""Translation of Mega's MultiHeadEMA.
This is a minimal implementation of the convolution kernel part of the module.
This module, together with the main S4 block in src.models.sequence.modules.s4block
(which is really just a fft-conv wrapper around any convolution kernel,
such as this one), should be exactly equivalent to using the original Mega
EMA module in src.models.sequence.modules.megablock.
Two additional flags have been provided to resolve discrepencies in parameter
count between S4(D) and EMA
- `dt_tie` makes the shape of the step size \\Delta (H, 1) instead of (H, N)
- `efficient_bidirectional` ties the A/B/dt parameters for the conv kernels
in both forwards and backwards directions. This should have exactly the same
speed, slightly more parameter efficiency, and similar performance.
"""
def __init__(
self,
d_state: int = 2,
dt_tie: bool = False,
efficient_bidirectional: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.N = N = d_state
self.channels = self.channels
self.scale = math.sqrt(1.0 / self.N)
# Exactly match the parameter count of S4(D) when bididirectional is on
self.efficient_bidirectional = efficient_bidirectional
if self.efficient_bidirectional:
H_C = self.H * self.channels
else:
self.H *= self.channels
H_C = self.H
delta = torch.Tensor(self.H, 1 if dt_tie else N, 1)
alpha = torch.Tensor(self.H, N, 1)
beta = torch.Tensor(self.H, N, 1)
self.register("delta", delta, self.lr_dict['dt'], self.wd_dict['dt'])
self.register("alpha", alpha, self.lr_dict['dt'], self.wd_dict['dt'])
self.register("beta", beta, self.lr_dict['dt'], self.wd_dict['dt'])
self.gamma = nn.Parameter(torch.Tensor(H_C, N))
# D skip connection handled by outside class
# self.omega = nn.Parameter(torch.Tensor(H))
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
nn.init.normal_(self.delta, mean=0.0, std=0.2)
nn.init.normal_(self.alpha, mean=0.0, std=0.2)
# Mega comment: beta [1, -1, 1, -1, ...] seems more stable.
val = torch.ones(self.N, 1)
if self.N > 1:
idx = torch.tensor(list(range(1, self.N, 2)))
val.index_fill_(0, idx, -1.0)
self.beta.normal_(mean=0.0, std=0.02).add_(val)
nn.init.normal_(self.gamma, mean=0.0, std=1.0)
# nn.init.normal_(self.omega, mean=0.0, std=1.0)
def coeffs(self): # Same as discretize
p = torch.sigmoid(self.delta) # (H N 1)
alpha = torch.sigmoid(self.alpha)
q = 1.0 - p * alpha
return p, q
def forward(self, L=None, state=None, rate=1.0):
L = L if self.l_max is None else min(self.l_max, L)
p, q = self.coeffs() # (H N 1)
vander = torch.arange(L).to(p).view(1, 1, L) * torch.log(q) # (H N L)
kernel = (p * self.beta) * torch.exp(vander)
if self.efficient_bidirectional:
C = rearrange(self.gamma * self.scale, '(c h) n -> c h n', c=self.channels)
kernel = torch.einsum('dnl,cdn->cdl', kernel, C)
else:
kernel = torch.einsum('dnl,dn->dl', kernel, self.gamma * self.scale)
kernel = rearrange(kernel, '(c h) l -> c h l', c=self.channels)
kernel = kernel[..., :L]
return kernel, None
| state-spaces-main | src/models/sequence/kernels/kernel.py |
from .kernel import ConvKernel, EMAKernel
from .ssm import SSMKernelDense, SSMKernelReal, SSMKernelDiag, SSMKernelDPLR
registry = {
'conv': ConvKernel,
'ema': EMAKernel,
'dense': SSMKernelDense,
'slow': SSMKernelDense,
'real': SSMKernelReal,
's4d': SSMKernelDiag,
'diag': SSMKernelDiag,
's4': SSMKernelDPLR,
'nplr': SSMKernelDPLR,
'dplr': SSMKernelDPLR,
}
| state-spaces-main | src/models/sequence/kernels/__init__.py |
"""SSM convolution kernels.
SSMKernelDPLR is the S4 kernel, implementing the 'diagonal plus low-rank' algorithm from the original S4 paper. This stores parameters A, B, C, dt, and calling it creates the SSM convolution kernel bar{K}.
SSMKernelDense is a much simpler version included for illustration purposes. It has the same output, but uses the naive SSM algorithm which is much slower. This module is meant for testing and exposition, to understand what the SSM Kernel actually does.
SSMKernelDiag is the S4D kernel, a simpler algorithm for computing the kernel for the case of diagonal state matrices A.
SSMKernel wraps these with common options and handles the initialization.
"""
from typing import Optional, Mapping, Tuple, Union
from collections import defaultdict
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor # For type hints
import numpy as np
from einops import rearrange, repeat
import src.models.hippo.hippo as hippo
import src.models.sequence.kernels.dplr as dplr
from src.models.functional.krylov import krylov, power
import src.utils.train
log = src.utils.train.get_logger(__name__)
# Try CUDA extension
try:
from extensions.kernels.cauchy import cauchy_mult as cauchy_cuda
from extensions.kernels.vandermonde import log_vandermonde_cuda
has_cuda_extension = True
log.info("CUDA extension for structured kernels (Cauchy and Vandermonde multiplication) found.")
except:
log.warning(
"CUDA extension for structured kernels (Cauchy and Vandermonde multiplication) not found. Install by going to extensions/kernels/ and running `python setup.py install`, for improved speed and memory efficiency. Note that the kernel changed for state-spaces 4.0 and must be recompiled."
)
has_cuda_extension = False
try:
import pykeops
from src.models.functional.cauchy import cauchy_conj as cauchy_keops
from src.models.functional.vandermonde import log_vandermonde as log_vandermonde_keops, log_vandermonde_transpose as log_vandermonde_transpose_keops
has_pykeops = True
log.info("Pykeops installation found.")
except ImportError:
has_pykeops = False
if not has_cuda_extension:
log.warning(
"Falling back on slow Cauchy and Vandermonde kernel. Install at least one of pykeops or the CUDA extension for better speed and memory efficiency."
)
# Fallback versions
from src.models.functional.cauchy import cauchy_naive
from src.models.functional.vandermonde import log_vandermonde_naive
from src.models.functional.vandermonde import log_vandermonde_transpose_naive
# Base Kernel class
from src.models.sequence.kernels.kernel import Kernel
# Alias torch.einsum; can easily swap to opt_einsum if desired
contract = torch.einsum
_isnan = lambda x: torch.isnan(x).any()
_isinf = lambda x: torch.isinf(x).any()
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
_c2r = torch.view_as_real
_r2c = torch.view_as_complex
if tuple(map(int, torch.__version__.split('.')[:2])) >= (1, 10):
_resolve_conj = lambda x: x.conj().resolve_conj()
else:
_resolve_conj = lambda x: x.conj()
def inv_transform(param, transform='none'):
"""Initialize a (positive) parameter under a transform."""
param = torch.clamp(param, min=1e-4)
if transform == 'none':
return param
elif transform == 'exp':
return torch.log(param) # Some of the HiPPO methods have real part 0
elif transform == 'relu':
return param
elif transform == 'sigmoid':
return torch.logit(param)
elif transform == 'softplus':
return torch.log(torch.exp(param)-1)
else: raise NotImplementedError
def param_transform(param, transform='none'):
"""Get a (positive) parameter under a transform."""
if transform == 'none':
p = param
elif transform == 'exp':
p = torch.exp(param)
elif transform == 'relu':
# JAX version seems to NaN if you allow 0's, although this code was fine without it
p = F.relu(param)+1e-4
elif transform == 'sigmoid':
p = F.sigmoid(param)
elif transform == 'softplus':
p = F.softplus(param)
else: raise NotImplementedError
return p
class SSMKernel(Kernel):
"""Parent class for different SSM parameterizations.
This class is abstract and only defines some initializations and flags that are common to all SSM variants.
It is instantiated by subclasses SSMKernel{Dense,Real,Diag,DPLR}.
Options:
d_state (N): State size (dimensionality of parameters A, B, C). Generally shouldn't need to be adjusted and doens't affect speed much for most kernels (e.g. S4, S4D).
deterministic: Use a deterministic initialization for dt, A, B, C.
Useful for debugging as well as constructing a simple exponential decay kernel (e.g. used in S4ND image->video inflation).
dt_min, dt_max: min and max values for the step size dt
dt_tie: Keep dt tied across the N dimensions of the state. Although this theoretically makes more sense, models such as S5 and Mega have found slightly improvements by setting it to False.
dt_transform: Transform function for parameterization of dt (default 'softplus', used to be 'exp')
rank: Rank of low-rank correction for DPLR mode. Needs to be increased for init "legt".
n_ssm: Number of independent trainable (A, B) SSMs, e.g.
`n_ssm=1` means all A/B parameters are tied across the H different instantiations of C.
`n_ssm=None` means all H SSMs are completely independent.
Generally, changing this option can save parameters but doesn't affect performance or speed much.
This parameter must divide H.
init: Options for initialization of (A, B). For DPLR mode, recommendations are "legs", "fout", "hippo" (combination of both). For Diag mode, recommendations are "diag-inv", "diag-lin", "diag-legs", and "diag" (combination of diag-inv and diag-lin).
init_args: Extra arguments passed into initialization function (see dplr.py for options).
"""
def init_dt(self):
# Generate dt
if self.deterministic: # Meant for debugging
assert self.dt_tie, "Deterministic dt initialization is tied"
assert self.dt_transform == 'exp', "Deterministic dt transform should be 'exp' for simplicity"
inv_dt = torch.exp(torch.linspace(math.log(self.dt_min), math.log(self.dt_max), self.H)).unsqueeze(-1) # (H 1)
else:
shape = (self.H, 1) if self.dt_tie else (self.H, self.N//2)
# Initialize log dt
inv_dt = torch.rand(*shape, dtype=self.dtype) * (
math.log(self.dt_max) - math.log(self.dt_min)
) + math.log(self.dt_min)
if self.dt_transform != 'exp':
inv_dt = inv_transform(torch.exp(inv_dt), self.dt_transform)
return inv_dt
def init_ssm_real(self):
"""Returns (dense, real) (A, B, C) parameters for init options."""
# Generate A, B
A, B = hippo.transition(self.init, self.N)
A = torch.as_tensor(A, dtype=self.dtype)
B = torch.as_tensor(B, dtype=self.dtype)[:, 0]
B = repeat(B, 'n -> v n', v=self.n_ssm).clone().contiguous()
A = repeat(A, 'n m -> v n m', v=self.n_ssm).clone().contiguous()
# Generate C
if self.deterministic:
C = torch.zeros(self.channels, self.H, self.N, dtype=self.dtype)
C[..., :1] = 1.0
else:
C = torch.randn(self.channels, self.H, self.N, dtype=self.dtype)
return A, B, C
def init_ssm_dplr(self):
"""Returns DPLR (A, P, B, C) parameters for init options."""
A, P, B, V = dplr.combination(self.init, self.N, self.rank, self.n_ssm, **self.init_args)
# Broadcast C to have H channels
if self.deterministic:
C = torch.zeros(self.channels, self.n_ssm, self.N, dtype=self.cdtype)
C[:, :, :1] = 1.
C = contract('hmn, chn -> chm', V.conj().transpose(-1, -2), C) # V^* C
C = repeat(C, 'c t n -> c (v t) n', v=self.H // C.size(-2)).clone().contiguous()
else:
C = torch.randn(self.channels, self.H, self.N//2, dtype=self.cdtype)
# Broadcast other parameters to have n_ssm copies
assert self.n_ssm % B.size(-2) == 0 \
and self.n_ssm % P.size(-2) == 0 \
and self.n_ssm % A.size(-2) == 0
# Broadcast tensors to n_ssm copies
# These will be the parameters, so make sure tensors are materialized and contiguous
B = repeat(B, 't n -> (v t) n', v=self.n_ssm // B.size(-2)).clone().contiguous()
P = repeat(P, 'r t n -> r (v t) n', v=self.n_ssm // P.size(-2)).clone().contiguous()
A = repeat(A, 't n -> (v t) n', v=self.n_ssm // A.size(-2)).clone().contiguous()
# Because these complex parameterizations assume conjugate symmetry,
# halve the value of self.N for convenience
self.N //= 2
return A, P, B, C
def __init__(
self,
# General Kernel arguments for parent class
d_model: int = 0,
channels: int = 1,
l_max: Optional[int] = None,
lr: Union[float, Optional[Mapping]] = None,
wd: Union[float, Optional[Mapping]] = 0.0,
verbose: bool = True,
# SSM arguments
d_state: int = 64,
deterministic: bool = False,
# dt options
dt_min: float = 0.001,
dt_max: float = 0.1,
dt_tie: bool = True,
dt_transform: str = 'exp',
# (A, B, C) options
rank: int = 1,
n_ssm: Optional[int] = None,
measure: Optional[str] = None,
init: Optional[str] = "legs",
# Extra hyperparameters for initialization
**init_args,
):
super().__init__(d_model=d_model, channels=channels, l_max=l_max, lr=lr, wd=wd, verbose=verbose)
self.N = d_state
self.dtype, self.cdtype = torch.float, torch.cfloat
self.deterministic = deterministic
# dt options
self.dt_min = dt_min
self.dt_max = dt_max
self.dt_tie = dt_tie
self.dt_transform = dt_transform
# SSM options (A, B, C)
self.rank = rank
self.n_ssm = n_ssm if n_ssm is not None else self.H
if measure is not None:
log.warning("Warning: 'measure' option changed to 'init' and will be removed in a future version.")
assert init is None, "'measure' and 'init' cannot both be passed into SSMKernel"
init, measure = measure, init
self.init = init
self.init_args = init_args
@torch.no_grad()
def forward_state(self, u, state):
"""Forward the state through a sequence, i.e. computes the state after passing chunk through SSM
This is a generic version of this functionality that works for SSMs.
It is currently used by SSMKernelDense and SSMKernelDPLR.
This is a suboptimal implementation; it is recommended to use SSMKernelDiag
if this functionality is desired.
state: (B, H, N)
u: (B, H, L)
Returns: (B, H, N)
"""
# Construct dA, dB matrices
dA, dB = self._setup_state() # (H N N) (H N)
conj = state.size(-1) != dA.size(-1)
if conj: state = _conj(state)
v = contract('h n, b h l -> b h n l', dB, u.flip(-1))
AL, v = power(u.size(-1), dA, v)
next_state = contract("h m n, b h n -> b h m", AL, state)
next_state = next_state + v
if conj: next_state = next_state[..., : next_state.size(-1) // 2]
return next_state
def _setup_state(self):
"""Register dA and dB to module."""
raise NotImplementedError
@property
def d_state(self):
"""d_state and state_to_tensor are used by specific decoders.
These were used in earlier versions and should not be needed in general.
"""
return self.H * self.N
@property
def state_to_tensor(self):
return lambda state: rearrange('... h n -> ... (h n)', state)
class SSMKernelDense(SSMKernel):
"""Slow version of SSMKernel function for illustration and benchmarking.
Uses dense A parameterization and computes kernel in naive way.
- Discretize A^(dt), B^(dt) using bilinear transform
- Compute length-L kernel K_L(A^(dt), B^(dt), C)
"""
@staticmethod
def bilinear(dt, A, B=None):
"""
dt: (H 1) timescales (or H N)
A: (H N N)
B: (H N)
"""
N = A.shape[-1]
I = torch.eye(N).to(A)
A_backwards = I - dt[:, None] / 2 * A # Doesn't quite make sense if dt has shape (H N)
A_forwards = I + dt[:, None] / 2 * A
if B is None:
dB = None
else:
dB = dt * torch.linalg.solve(
A_backwards, B.unsqueeze(-1)
).squeeze(-1) # (... N)
dA = torch.linalg.solve(A_backwards, A_forwards) # (... N N)
return dA, dB
def __init__(self, comp=False, **kwargs):
"""
comp: Use Companion matrix parameterization (SpaceTime).
"""
super().__init__(**kwargs)
self.comp = comp
# Initialize dt, A, B, C
inv_dt = self.init_dt()
A, P, B, C = self.init_ssm_dplr()
# Materialize dense A, B, C
if self.comp:
# Special case for companion matrix parameterization
A = torch.zeros_like(_conj(A))
else:
A = torch.diag_embed(_conj(A)) \
- contract('r s p, r s q -> s p q', _conj(P), _conj(P).conj())
self.N *= 2 # Double N again since no conjugate symmetry
B, C = _conj(B), _conj(C)
self.register_params(A, B, C, inv_dt)
def register_params(self, A, B, C, inv_dt):
assert self.N == A.size(-1)
assert self.H == inv_dt.size(0)
assert self.n_ssm == A.size(0) == B.size(0)
self.repeat = self.H // A.size(0)
C = C.expand(torch.broadcast_shapes(C.shape, (1, self.H, self.N))) # (C, H, N)
# Register parameters
self.register("inv_dt", inv_dt, self.lr_dict['dt'], self.wd_dict['dt'])
self.register("A", _c2r(A), self.lr_dict['A'], self.wd_dict['A'])
self.register("B", _c2r(B), self.lr_dict['A'], self.wd_dict['B'])
self.C = nn.Parameter(_c2r(_resolve_conj(C)))
# Cache if nothing is trained
is_trainable = lambda lr: lr is None or lr > 0.0
self.trainable = is_trainable(self.lr_dict['dt']) \
or is_trainable(self.lr_dict['A']) \
or is_trainable(self.lr_dict['B'])
self.K = None # Compute in forward pass since that ensures correct device
def forward(self, state=None, rate=1.0, L=None):
if L is None: L = self.L
# This class shouldn't support the more advanced sampling and variable length functionalities, since it's just for testing
# But the code from NPLR could be pasted here if desired
# assert rate == 1.0 and L is not None
if self.trainable or self.K is None:
dA, dB = self._setup_state()
self.dA, self.dB = dA, dB
# Need to calculate dA, dB
if self.trainable:
k = krylov(L, self.dA, self.dB, _r2c(self.C)) # (H L)
else:
if self.K is None:
self.K = krylov(L, self.dA, self.dB) # (H N L)
k = contract('hnl,chn->chl', self.K[..., :L], _r2c(self.C))
k = k.float()
if state is not None:
state = state.to(self.dA)
# Compute A @ s
state = contract("h n m, b h m -> b h n", self.dA, state)
k_state = krylov(L, self.dA, state.unsqueeze(-3), _r2c(self.C))
k_state = k_state.float()
else:
k_state = None
return k, k_state
def default_state(self, *batch_shape):
C = _r2c(self.C)
state = torch.zeros(*batch_shape, self.H, self.N, dtype=C.dtype, device=C.device)
return state
def _setup_state(self):
A, B = _r2c(self.A), _r2c(self.B)
A = repeat(A, 't n m -> (v t) n m', v=self.repeat)
B = repeat(B, 't n -> (v t) n', v=self.repeat)
if self.comp:
dA = A.new_zeros((self.H, self.N, self.N))
dA[:, 1:, :-1] = torch.eye(self.N-1, dtype=A.dtype, device=A.device)
# A = A/torch.linalg.norm(A,ord=1,dim=-1,keepdims=True)
dA[:, :, -1] = A
dB = _r2c(self.B).expand((self.H, self.N))
dA = dA.real + 0j
dB = dB.real + 0j
else:
dt = param_transform(self.inv_dt, self.dt_transform)
dA, dB = SSMKernelDense.bilinear(dt, A, B)
return dA, dB
def _setup_step(self):
self.dA, self.dB = self._setup_state()
self.dC = _r2c(self.C)
def step(self, u, state):
next_state = contract("h m n, b h n -> b h m", self.dA, state) \
+ contract("h n, b h -> b h n", self.dB, u)
y = contract("c h n, b h n -> b c h", self.dC, next_state)
return y.real, next_state
class SSMKernelReal(SSMKernelDense):
"""Dense and real version of SSMKernel (e.g. using original real-valued HiPPO matrices) for testing."""
def __init__(self, **kwargs):
super().__init__(comp=False, **kwargs)
inv_dt = self.init_dt()
A, B, C = self.init_ssm_real()
# SSMKernelDense is designed to work with complex
A, B, C = A.to(torch.cfloat), B.to(torch.cfloat), C.to(torch.cfloat)
self.register_params(A, B, C, inv_dt)
class SSMKernelDiag(SSMKernel):
"""SSM kernel using diagonal state matrix (S4D model).
Options:
disc: ['zoh' | 'bilinear' | 'dss'] Discretization options.
dt_fast: (experimental) Parameterize inv_dt under sinh function.
(Ohno et al. "Fast Saturating Gate for Learning Long Time Scales with RNNs")
real_transform, imag_transform: ['none' | 'exp' | 'relu' | 'sigmoid' | 'softplus']
Parameterize the real/imag parts of the diagonal of A under this function.
bandlimit: Mask high frequencies of the kernel (indices corresponding to
diagonal elements with large imaginary part). Introduced in S4ND paper.
backend: ['cuda' | 'keops' | 'naive'] Options for Vandermonde/Cauchy kernel (in order of efficiency).
is_real : Real-valued SSM; can be interpreted as EMA.
"""
def __init__(
self,
disc: str = 'zoh', # Change to 'bilinear' to match S4, but should make little difference either way
dt_fast: bool = False,
real_transform: str = 'exp',
imag_transform: str = 'none',
bandlimit: Optional[float] = None,
backend: str = 'cuda',
is_real: bool = False,
**kwargs,
):
# Special case: for real-valued, d_state semantics change
if is_real and 'd_state' in kwargs:
kwargs['d_state'] = kwargs['d_state'] * 2
super().__init__(**kwargs)
self.disc = disc
self.dt_fast = dt_fast
self.real_transform = real_transform
self.imag_transform = imag_transform
self.bandlimit = bandlimit
self.backend = backend
self.is_real = is_real
# Initialize dt, A, B, C
inv_dt = self.init_dt()
A, P, B, C = self.init_ssm_dplr()
# Note that in the Diag case, P will be ignored
# The DPLR case subclasses this and uses P
self.register_params(A, B, C, inv_dt, P)
def register_params(self, A, B, C, inv_dt, P):
"""Process the initialization into form of trainable parameters.
A: (S, N) diagonal matrix
B: (S, N)
C: (C, H, N)
dt: (H) timescale per feature
Dimensions:
N (or d_state): state size
H (or d_model): total SSM copies
S (or n_ssm): number of trainable copies of (A, B, dt); must divide H
C (or channels): system is 1-dim to C-dim
The forward pass of this Module returns a tensor of shape (C, H, L)
Note: tensor shape N here denotes half the true state size, because of conjugate symmetry
"""
assert self.backend in ['cuda', 'keops', 'naive']
if self.dt_fast: inv_dt = torch.asinh(inv_dt)
# Rank of low-rank correction
assert self.H == inv_dt.size(0)
assert self.N == A.size(-1) == B.size(-1) == C.size(-1)
assert self.n_ssm == A.size(-2) == B.size(-2) # Number of independent SSMs trained
self.repeat = self.H // A.size(0)
# Check that diagonal part has negative real and imag part
# (allow some tolerance for numerical precision on real part
# since it may be constructed by a diagonalization)
assert torch.all(A.real < 1e-4) and torch.all(A.imag <= 0.0)
# Broadcast everything to correct shapes
C = C.expand(torch.broadcast_shapes(C.shape, (1, self.H, self.N))) # (C, H, N) # TODO originally this was only in DPLR, check safe for Diag
B = B.unsqueeze(0) # (1, H, N)
assert self.channels == C.shape[0]
# Register dt
self.register("inv_dt", inv_dt, self.lr_dict['dt'], self.wd_dict['dt'])
# Register ABC
if self.is_real:
self.register("C", C.real, self.lr_dict['C'], None)
self.register("B", B.real, self.lr_dict['B'], self.wd_dict['B'])
self.register("A_real", inv_transform(-A.real, self.real_transform), self.lr_dict['A'], self.wd_dict['A'])
else:
self.register("C", _c2r(_resolve_conj(C)), self.lr_dict['C'], None)
self.register("B", _c2r(B), self.lr_dict['B'], self.wd_dict['B'])
self.register("A_real", inv_transform(-A.real, self.real_transform), self.lr_dict['A'], self.wd_dict['A'])
self.register("A_imag", inv_transform(-A.imag, self.imag_transform), self.lr_dict['A'], self.wd_dict['A'])
def _get_params(self, rate=1.0):
"""Process the internal parameters."""
# (S N) where S=n_ssm
if self.is_real:
A = -param_transform(self.A_real, self.real_transform)
B = self.B # (1 S N)
C = self.C # (C H N)
else:
A = -param_transform(self.A_real, self.real_transform) - 1j * param_transform(self.A_imag, self.imag_transform)
B = _r2c(self.B) # (1 S N)
C = _r2c(self.C) # (C H N)
if self.dt_fast: inv_dt = torch.sinh(self.inv_dt)
else: inv_dt = self.inv_dt
dt = param_transform(inv_dt, self.dt_transform) * rate # (H N)
if self.bandlimit is not None:
freqs = dt / rate * A.imag.abs() / (2*math.pi) # (H N)
mask = torch.where(freqs < self.bandlimit * .5, 1, 0)
C = C * mask
# Incorporate dt into A and B
A = repeat(A, 't n -> (v t) n', v=self.repeat) # (H N)
B = repeat(B, 'b t n -> b (v t) n', v=self.repeat) # (1 H N)
# TODO: The downstream algorithm should only need to access dt*A
# However the current DPLR kernel still uses dt and A separately
# Once that is fixed, this should return dtA instead of dt and A
dtA = dt * A # (H N)
return dt, A, B, C
def forward(self, L, state=None, rate=1.0):
"""See Kernel.forward() for argument documentation."""
dt, A, B, C = self._get_params(rate)
dtA = dt * A
# Augment B with state
if state is not None:
s = state / dt
if self.disc == 'bilinear':
s = s * (1. + dtA/2)
elif self.disc == 'zoh':
s = s * dtA * dtA.exp() / (dtA.exp() - 1.)
B = torch.cat([s, B], dim=-3) # (1+B H N)
# Combine B and C
C = (B[:, None, :, :] * C).view(-1, self.H, self.N)
# Dispatch which Vandermonde kernel to use
if has_cuda_extension and C.dtype == torch.cfloat and C.device.type == 'cuda' and self.backend == 'cuda':
log_vandermonde = log_vandermonde_cuda
elif has_pykeops and self.backend in ['cuda', 'keops']:
log_vandermonde = log_vandermonde_keops
else:
log_vandermonde = log_vandermonde_naive
# Main kernel
if self.disc == 'zoh':
# Power up
C = C * (torch.exp(dtA)-1.) / A
K = log_vandermonde(C, dtA, L) # (H L)
elif self.disc == 'bilinear':
C = C * (1. - dtA/2).reciprocal() * dt # or * dtA / A
dA = (1. + dtA/2) / (1. - dtA/2)
K = log_vandermonde(C, dA.log(), L)
elif self.disc == 'dss':
# Implementation from DSS meant for case when real eigenvalues can be positive
P = dtA.unsqueeze(-1) * torch.arange(L, device=C.device) # [H N L]
A_gt_0 = A.real > 0 # [N]
if A_gt_0.any():
with torch.no_grad():
P_max = dtA * (A_gt_0 * (L-1)) # [H N]
P = P - P_max.unsqueeze(-1) # [H N L]
S = P.exp() # [H N L]
dtA_neg = dtA * (1 - 2*A_gt_0) # [H N]
num = dtA_neg.exp() - 1 # [H N]
den = (dtA_neg * L).exp() - 1 # [H N]
# Inline reciprocal function for DSS logic
x = den * A
x_conj = _resolve_conj(x)
r = x_conj / (x*x_conj + 1e-7)
C = C * num * r # [C H N]
K = contract('chn,hnl->chl', C, S).float()
else: raise ValueError(f"Discretization {self.disc} not supported")
K = K.view(-1, self.channels, self.H, L) # (1+B C H L)
if state is not None:
K_state = K[:-1, :, :, :] # (B C H L)
else:
K_state = None
K = K[-1, :, :, :] # (C H L)
return K, K_state
def _setup_step(self):
"""Set up dA, dB, dC discretized parameters for stepping."""
dt, A, B, C, = self._get_params()
# Incorporate dt into A
dtA = dt * A # (H N)
if self.disc == 'zoh':
self.dA = torch.exp(dtA) # (H N)
self.dB = B * (torch.exp(dtA)-1.) / A # (C H N)
elif self.disc == 'bilinear':
self.dA = (1. + dtA/2) / (1. - dtA/2)
self.dB = B * (1. - dtA/2).reciprocal() * dt # or * dtA / A
self.dB = rearrange(self.dB, '1 h n -> h n')
self.dC = C
def default_state(self, *batch_shape):
C = _r2c(self.C)
state = torch.zeros(*batch_shape, self.H, self.N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
next_state = contract("h n, b h n -> b h n", self.dA, state) \
+ contract("h n, b h -> b h n", self.dB, u)
y = contract("c h n, b h n -> b c h", self.dC, next_state)
return 2*y.real, next_state
def forward_state(self, u, state):
"""Pass the state forward through an entire sequence."""
self._setup_step()
AL = self.dA ** u.size(-1)
u = u.flip(-1).to(self.dA).contiguous() # (B H L)
# Dispatch which Vandermonde kernel to use
if has_pykeops and self.backend in ['cuda', 'keops']:
log_vandermonde_transpose = log_vandermonde_transpose_keops
else:
log_vandermonde_transpose = log_vandermonde_transpose_naive
v = log_vandermonde_transpose(u, self.dB, self.dA.log(), u.size(-1))
next_state = AL * state + v
return next_state
class SSMKernelDPLR(SSMKernelDiag):
"""SSM kernel for diagonal + low rank (DPLR) state matrices, corresponding to the original S4 model."""
@torch.no_grad()
def _setup_C(self, L):
"""Construct C~ from C.
Two modes are supported: go directly to length L if self.l_kernel is 1, or length is doubled
"""
if self.l_kernel.item() == 0:
if self.verbose: log.info(f"S4: Initializing kernel to length {L}")
double_length = False
elif L > self.l_kernel.item(): # 2*int(self.l_kernel) == L:
if self.verbose: log.info(f"S4: Doubling length from L = {self.l_kernel.item()} to {2*self.l_kernel.item()}")
double_length = True
L = self.l_kernel.item() # Convenience for the math below
else: return
C = _r2c(self.C)
dA, _ = self._setup_state()
dA_L = power(L, dA)
# Multiply C by I - dA_L
C_ = _conj(C)
prod = contract("h m n, c h n -> c h m", dA_L.transpose(-1, -2), C_)
if double_length: prod = -prod # Multiply by I + dA_L instead
C_ = C_ - prod
C_ = C_[..., :self.N] # Take conjugate pairs again
self.C.copy_(_c2r(C_))
self.l_kernel = 2*self.l_kernel if double_length else self.l_kernel+L # Preserve type/device
def _omega(self, L, dtype, device, cache=True):
"""Calculate (and cache) FFT nodes.
This also caches a version of the nodes "unprocessed" with the bilinear transform.
This method should be called everytime the internal length self.l_kernel changes.
"""
# Use cached if available
if cache and hasattr(self, 'omega') and self.omega.size(-1) == L//2+1:
return self.omega, self.z
omega = torch.tensor(
np.exp(-2j * np.pi / (L)), dtype=dtype, device=device
) # \omega_{2L}
omega = omega ** torch.arange(0, L // 2 + 1, device=device)
z = 2 * (1 - omega) / (1 + omega)
# Cache if necessary
if cache:
self.omega = omega
self.z = z
return omega, z
def register_params(self, A, B, C, inv_dt, P):
"""Process the initialization into form of trainable parameters.
The SSM state matrix is represented by diag_embed(A) - PP^*
Note that the A notation here is slightly overloaded:
normally A refers to the full SSM state matrix (DPLR in this case)
but here we're using it to refer to the diagonal part of the matrix.
This is to make variable names compatible with the SSMKernelDiag class (DSS/S4D)
and is a much simpler variable name (e.g. as opposed to Lambda).
A: (S, N) diagonal part
P: (R, S, N) low-rank part
B: (S, N)
C: (C, H, N)
dt: (H) timescale per feature
Dimensions:
N (or d_state): state size
H (or d_model): total SSM copies
S (or n_ssm): number of trainable copies of (A, B, dt); must divide H
R (or rank): rank of low-rank part
C (or channels): system is 1-dim to C-dim
The forward pass of this Module returns a tensor of shape (C, H, L)
Note: tensor shape N here denotes half the true state size, because of conjugate symmetry
"""
# Print out kernel lengths; it can be tricky to make sure the length logic is correct
if self.verbose:
log.info(f"Constructing S4 (H, N, L) = ({self.H}, {self.N}, {self.l_max})")
# Register the basic params for diagonal SSM (A, B, C, dt)
super().register_params(A, B, C, inv_dt, P)
# Check shapes
assert self.rank == P.shape[-3]
assert self.N == P.size(-1)
assert self.n_ssm == P.size(-2)
self.register('P', _c2r(P), self.lr_dict['A'], self.wd_dict['A'])
# Track the current kernel length this is "attuned" to
self.register_buffer('l_kernel', torch.tensor(0))
def _get_params(self, rate=1.0):
dt, A, B, C = super()._get_params(rate=rate)
P = _r2c(self.P) # (R S N)
P = repeat(P, 'r t n -> r (v t) n', v=self.repeat) # (R H N)
Q = P.conj()
return dt, A, B, C, P, Q
def forward(self, state=None, rate=1.0, L=None):
"""See Kernel.forward() for argument documentation."""
# Initialize C~ if necessary (done in forward pass so it's on the correct device)
if self.l_kernel.item() == 0 and self.l_max is not None and self.l_max > 0:
self._setup_C(self.l_max)
# Handle sampling rate logic
# The idea is that this kernel's length (in continuous units) is self.l_kernel, while we are asked to provide a kernel of length L at (relative) frequency rate
if L is None:
L = round(self.l_kernel.item() / rate)
# Increase the internal length if needed
continuous_L = round(rate*L)
while continuous_L > self.l_kernel.item():
self._setup_C(continuous_L)
discrete_L = round(self.l_kernel.item()/rate)
dt, A, B, C, P, Q = self._get_params(rate)
# Get FFT nodes of right length
omega, z = self._omega(discrete_L, dtype=A.dtype, device=A.device, cache=(rate==1.0))
# Augment B
if state is not None:
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute 1/dt * (I + dt/2 A) @ state
# Can do this without expanding (maybe minor speedup using conj symmetry in theory), but it's easier to read this way
s = _conj(state) if state.size(-1) == self.N else state # (B H N)
sA = (
s * _conj(A) # (B H N)
- contract('bhm, rhm, rhn -> bhn', s, _conj(Q), _conj(P))
)
s = s / dt + sA / 2
s = s[..., :self.N]
B = torch.cat([s, B], dim=-3) # (B+1, H, N)
# Incorporate dt into A
A = A * dt # (H N)
# Stack B and p, C and q for convenient batching
B = torch.cat([B, P], dim=-3) # (B+1+R, H, N)
C = torch.cat([C, Q], dim=-3) # (C+R, H, N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-4) # (B+1+R, C+R, H, N)
v = v * dt # Incorporate dt into B
# Dispatch which Cauchy kernel to use
if has_cuda_extension and z.dtype == torch.cfloat and z.device.type == 'cuda' and self.backend == 'cuda':
cauchy_mult = cauchy_cuda
elif has_pykeops and self.backend in ['cuda', 'keops']:
cauchy_mult = cauchy_keops
else:
cauchy_mult = cauchy_naive
# Calculate resolvent at omega
r = cauchy_mult(v, z, A)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[:-1, :-1, :, :] - r[:-1, -1:, :, :] * r[-1:, :-1, :, :] / (1 + r[-1:, -1:, :, :])
elif self.rank == 2:
r00 = r[: -self.rank, : -self.rank, :, :]
r01 = r[: -self.rank, -self.rank :, :, :]
r10 = r[-self.rank :, : -self.rank, :, :]
r11 = r[-self.rank :, -self.rank :, :, :]
det = (1 + r11[:1, :1, :, :]) * (1 + r11[1:, 1:, :, :]) - r11[:1, 1:, :, :] * r11[1:, :1, :, :]
s = (
r01[:, :1, :, :] * (1 + r11[1:, 1:, :, :]) * r10[:1, :, :, :]
+ r01[:, 1:, :, :] * (1 + r11[:1, :1, :, :]) * r10[1:, :, :, :]
- r01[:, :1, :, :] * (r11[:1, 1:, :, :]) * r10[1:, :, :, :]
- r01[:, 1:, :, :] * (r11[1:, :1, :, :]) * r10[:1, :, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[:-self.rank, :-self.rank, :, :]
r01 = r[:-self.rank, -self.rank:, :, :]
r10 = r[-self.rank:, :-self.rank, :, :]
r11 = r[-self.rank:, -self.rank:, :, :]
r11 = rearrange(r11, "a b h n -> h n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "h n a b -> a b h n")
k_f = r00 - torch.einsum("i j h n, j k h n, k l h n -> i l h n", r01, r11, r10)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + omega)
# Move from frequency to coefficients
k = torch.fft.irfft(k_f, n=discrete_L) # (B+1, C, H, L)
# # Truncate to target length
k = k[..., :L]
if state is not None:
k_state = k[:-1, :, :, :] # (B, C, H, L)
else:
k_state = None
k_B = k[-1, :, :, :] # (C H L)
return k_B, k_state
@torch.no_grad()
def double_length(self):
self._setup_C(2*self.l_kernel)
@torch.no_grad()
def _check(self):
"""Check if A, B, C parameters and vanilla SSMKernel construction can be recovered"""
# assert self.l_kernel > 0, "Set up module first"
K = self.forward(L=self.l_max)[0]
self._setup_step()
K_ = krylov(self.l_max, self.dA, self.dB, self.dC)
diff = K - K_
print("checking DPLR Kernel construction", torch.sum(diff ** 2))
@torch.no_grad()
def _setup_linear(self):
"""Preprocessing that allows fast linear-time (in state dimension) stepping."""
dt, A, B, C, P, Q = self._get_params()
# Prepare Linear stepping
D = (2.0 / dt - A).reciprocal() # (H, N)
R = (torch.eye(self.rank, dtype=A.dtype, device=A.device) + 2*contract('r h n, h n, s h n -> h r s', Q, D, P).real) # (H R R)
Q_D = rearrange(Q*D, 'r h n -> h r n')
try:
R = torch.linalg.solve(R, Q_D) # (H R N)
except:
R = torch.tensor(np.linalg.solve(R.to(Q_D).contiguous().detach().cpu(), Q_D.contiguous().detach().cpu())).to(Q_D)
R = rearrange(R, 'h r n -> r h n')
self.step_params = {
"D": D, # (H N)
"R": R, # (R H N)
"P": P, # (R H N)
"Q": Q, # (R H N)
"B": B, # (1 H N)
"E": 2.0 / dt + A, # (H N)
}
def _step_state_linear(self, u=None, state=None):
"""
Version of the step function that has time O(N) instead of O(N^2) per step, which takes advantage of the DPLR form and bilinear discretization.
Unfortunately, as currently implemented it's about 2x slower because it calls several sequential operations.
Perhaps a fused CUDA kernel implementation would be much faster.
u: (H) Input
state: (H, N/2) State with conjugate pairs. Optionally, the state can have last dimension N.
Returns: same shape as state
"""
C = _r2c(self.C) # View used for dtype/device
if u is None: # Special case used to find dA
u = torch.zeros(self.H, dtype=C.dtype, device=C.device)
if state is None: # Special case used to find dB
state = torch.zeros(self.H, self.N, dtype=C.dtype, device=C.device)
step_params = self.step_params.copy()
if state.size(-1) == self.N: # Only store half of the conjugate pairs; should be true by default
# There should be a slightly faster way using conjugate symmetry
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', _conj(p), _conj(x), _conj(y))[..., :self.N] # inner outer product
else:
assert state.size(-1) == 2*self.N
step_params = {k: _conj(v) for k, v in step_params.items()}
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', p, x, y) # inner outer product
D = step_params["D"] # (H N)
E = step_params["E"] # (H N)
R = step_params["R"] # (R H N)
P = step_params["P"] # (R H N)
Q = step_params["Q"] # (R H N)
B = step_params["B"] # (1 H N)
new_state = E * state - contract_fn(P, Q, state) # (B H N)
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
new_state = D * (new_state - contract_fn(P, R, new_state))
return new_state
def _setup_state(self):
"""Construct dA and dB for discretized state equation."""
# Construct dA and dB by using the stepping
self._setup_linear()
C = _r2c(self.C) # Just returns a view that we use for finding dtype/device
state = torch.eye(2*self.N, dtype=C.dtype, device=C.device).unsqueeze(-2) # (N 1 N)
dA = self._step_state_linear(state=state)
dA = rearrange(dA, "n h m -> h m n")
u = C.new_ones(self.H)
dB = self._step_state_linear(u=u)
dB = _conj(dB)
dB = rearrange(dB, '1 h n -> h n') # (H N)
return dA, dB
def _step_state(self, u, state):
"""Must be called after self.default_state() is used to construct an initial state!"""
next_state = (torch.einsum(self.state_contraction, self.dA, state)
+ torch.einsum(self.input_contraction, self.dB, u))
return next_state
def _setup_step(self, mode='dense'):
"""Set up dA, dB, dC discretized parameters for stepping."""
self.dA, self.dB = self._setup_state()
# Calculate original C
C = _conj(_r2c(self.C)) # (H C N)
if self.l_kernel.item() == 0:
dC = C
else:
# self.C represents C_tilde
dA_L = power(self.l_kernel.item(), self.dA)
I = torch.eye(self.dA.size(-1)).to(dA_L)
dC = torch.linalg.solve(
I - dA_L.transpose(-1, -2),
C.unsqueeze(-1),
).squeeze(-1)
self.dC = dC
# Do special preprocessing for different step modes
self._step_mode = mode
if mode == 'linear':
# Linear case: special step function for the state, we need to handle output
# use conjugate symmetry by default, which affects the output projection
self.dC = 2*self.dC[:, :, :self.N]
elif mode == 'diagonal':
# Eigendecomposition of the A matrix
L, V = torch.linalg.eig(self.dA)
V_inv = torch.linalg.inv(V)
# Check that the eigendedecomposition is correct
if self.verbose:
print("Diagonalization error:", torch.dist(V @ torch.diag_embed(L) @ V_inv, self.dA))
# Change the parameterization to diagonalize
self.dA = L
self.dB = contract('h n m, h m -> h n', V_inv, self.dB)
self.dC = contract('h n m, c h n -> c h m', V, self.dC)
elif mode == 'dense':
pass
else: raise NotImplementedError("DPLR Kernel step mode must be {'dense' | 'linear' | 'diagonal'}")
def default_state(self, *batch_shape):
C = _r2c(self.C)
N = C.size(-1)
H = C.size(-2)
# Cache the tensor contractions we will later do, for efficiency
# These are put in this function because they depend on the batch size
step_mode = getattr(self, "_step_mode", "dense") # Used in default_state, which is called without _setup_step() in forward_state()
if step_mode != 'linear':
N *= 2
if step_mode == 'diagonal':
self.state_contraction = "h n, ... h n -> ... h n"
else:
# Dense (quadratic) case: expand all terms
self.state_contraction = "h m n, ... h n -> ... h m"
self.input_contraction = "h n, ... h -> ... h n"
self.output_contraction = "c h n, ... h n -> ... c h"
state = torch.zeros(*batch_shape, H, N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
"""Must have called self._setup_step() and created state with self.default_state() before calling this."""
if self._step_mode == 'linear':
new_state = self._step_state_linear(u, state)
else:
new_state = self._step_state(u, state)
y = torch.einsum(self.output_contraction, self.dC, new_state)
return y.real, new_state
def forward_state(self, *args, **kwargs):
# Dispatch directly to generic state forwarding
# instead of using the Diag version
# TODO design pattern is ugly. Can be fixed with an intermediate
# subclass above Diag/DPLR that has the shared logic (parameter construction)
# but not the state/step logic.
# Fine to keep like this for now since we want Diag to be the standard
# instead of having too many layers of subclassing.
return SSMKernel.forward_state(self, *args, **kwargs)
| state-spaces-main | src/models/sequence/kernels/ssm.py |
"""Initializations of structured state space (S4) models with diagonal plus low rank (DPLR) parameterization."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
import src.models.hippo.hippo as hippo
import src.utils.train
log = src.utils.train.get_logger(__name__)
def dplr(
init='hippo',
N=64, rank=1, H=1,
dtype=torch.float,
real_random=False,
real_scale=1.0,
imag_random=False,
imag_scale=1.0,
B_random=False,
B_init='constant',
B_scale=1.0,
P_scale=1.0,
normalize=False,
):
"""Directly construct a DPLR matrix.
Args:
- init: (str) ['rand', 'lin', inv', 'real', 'hippo'] Choices for initialization of A.
Most of these affect the imaginary part of A, except for 'real'.
- real_random: (bool) Initialize A.real in -U[0, 1]. Otherwise, initialize to -1/2.
- real_scale: (float) Scaling factor of real part of A.
- imag_random: (bool) Initialize A.imag randomly.
- imag_scale: (bool) Scaling factor of imaginary part of A.
- B_init: (str) ['constant' | 'random' | 'alternating' | 'unit-cw' | 'unit-ccw' | 'hippo']
Choices for initialization of B.
- B_scale: (float) Scaling factor for B
- P_scale: (float) Scaling factor for P
- normalize: (bool) Apply an automatic normalization factor on B
"""
assert dtype == torch.float or dtype == torch.double
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
pi = torch.tensor(math.pi)
# Construct real part of diagonal A (must be non-negative)
if real_random:
real_part = torch.rand(H, N//2)
else:
real_part = .5 * torch.ones(H, N//2)
real_part = real_scale * real_part
# Construct imaginary part of diagonal A (must be non-negative)
if imag_random:
imag_part = N//2 * torch.rand(H, N//2)
else:
imag_part = repeat(torch.arange(N//2), 'n -> h n', h=H)
if init in ['random', 'rand']:
imag_part = torch.exp(torch.randn(H, N//2))
elif init == 'real':
imag_part = 0 * imag_part
if real_random:
real_part = torch.rand(H, N//2) * N//2
else:
# This is the S4D-Real method described in the S4D paper
# The A matrix is diag(-1, -2, ..., -N), which are the eigenvalues of the HiPPO matrix
real_part = 1 + repeat(torch.arange(N//2), 'n -> h n', h=H)
elif init in ['linear', 'lin']:
imag_part = pi * imag_part
elif init in ['inverse', 'inv']: # Based on asymptotics of the default HiPPO matrix
imag_part = 1/pi * N * (N/(1+2*imag_part)-1)
elif init in ['inverse2', 'inv2']:
imag_part = 1/pi * N * (N/(1+imag_part)-1)
elif init in ['quadratic', 'quad']:
imag_part = 1/pi * (1+2*imag_part)**2
elif init in ['legs', 'hippo']:
A, _, _, _ = hippo.nplr('legs', N)
imag_part = -A.imag # Positive
else: raise NotImplementedError
imag_part = imag_scale * imag_part
# Construct diagonal A
A = -real_part - 1j * imag_part # Force negative real and imag
assert torch.all(A.real < 1e-4) and torch.all(A.imag <= 0.0) # Allow some tolerance for numerical precision on real part
# Initialize B
if B_random:
log.warning("'B_random' is deprecated in favor of B_init='random' and will be deprecated in a future version.")
if init in ['legs', 'hippo']:
log.info(f'Initializing with S4D-LegS and ignoring argument {B_init=}')
B_init = 'legs'
if B_init in ['legs', 'hippo']:
# Special initialization using the HiPPO B matrix
# Note that theory (from S4D paper) says that B should be halved
# to match DPLR but we drop this 0.5 factor for simplicity
_, P, B, _ = hippo.nplr('legs', N, B_clip=2.0)
B = repeat(B, 'n -> h n', h=H).clone().contiguous()
elif B_init == 'constant':
B = torch.ones(H, N//2, dtype=dtype)
elif B_init == 'random':
B = torch.randn(H, N//2, dtype=dtype)
elif B_init == 'alternating': # Seems to track 'constant' exactly for some reason
B = torch.ones(H, N//4, 2, dtype=dtype)
B[:, :, 1] *= -1
B = B.view(H, N//2)
elif B_init == 'unit-cw':
z = torch.tensor(torch.exp(-2j * pi / N), dtype=dtype)
B = z ** torch.arange(0, N // 2)
B = repeat(B, 'n -> h n', h=H).clone().contiguous()
elif B_init == 'unit-ccw':
z = torch.tensor(torch.exp(2j * pi / N), dtype=dtype)
B = z ** torch.arange(0, N // 2)
B = repeat(B, 'n -> h n', h=H).clone().contiguous()
else: raise NotImplementedError
B *= B_scale
# Experimental feature that appeared in earlier versions of HTTYH (not extensively tested)
# Seems more principled for normalization theoretically, but seemed to hurt on PathX
if normalize:
norm = -B/A # (H, N) # Result if you integrate the kernel with constant 1 function
zeta = 2*torch.sum(torch.abs(norm)**2, dim=-1, keepdim=True) # Variance with a random C vector
B = B / zeta**.5
# Initialize P
if B_init in ['legs', 'hippo']:
# P constructed earlier
P = repeat(P, 'r n -> r h n', h=H).clone().contiguous()
else:
P = torch.randn(rank, H, N//2, dtype=dtype)
P = P * P_scale
# Initialize V (only used in testing)
V = torch.eye(N, dtype=dtype)[:, :N//2]
V = repeat(V, 'n m -> h n m', h=H)
return A, P, B, V
def ssm(init, N, R, H, **ssm_args):
"""Dispatcher to create single SSM initialization
N: state size
R: rank (for DPLR parameterization)
H: number of independent SSM copies
"""
if init.startswith("diag") or init.startswith("dplr"):
if init.startswith("diag"):
ssm_args["P_scale"] = 0.0
args = init[4:].split("-")
assert args[0] == ""
if len(args) > 1:
ssm_args["init"] = args[1]
A, P, B, V = dplr(N=N, rank=R, H=H, **ssm_args)
else:
A, P, B, V = hippo.nplr(init, N, R, **ssm_args)
A = repeat(A, 'n -> s n', s=H)
P = repeat(P, 'r n -> r s n', s=H)
B = repeat(B, 'n -> s n', s=H)
V = repeat(V, 'n m -> s n m', s=H)
return A, P, B, V
combinations = {
'hippo': ['legs', 'fourier'],
'diag': ['diag-inv', 'diag-lin'],
'all': ['legs', 'fourier', 'diag-inv', 'diag-lin'],
}
def combination(inits, N, R, S, **ssm_args):
if isinstance(inits, str):
inits = combinations[inits] if inits in combinations else [inits]
assert S % len(inits) == 0, f"{S} independent trainable SSM copies must be multiple of {len(inits)} different inits"
A, P, B, V = zip(
*[ssm(init, N, R, S // len(inits), **ssm_args) for init in inits]
)
A = torch.cat(A, dim=0) # (S N)
P = torch.cat(P, dim=1) # (R S N)
B = torch.cat(B, dim=0) # (S N)
V = torch.cat(V, dim=0) # (S N N)
return A, P, B, V
| state-spaces-main | src/models/sequence/kernels/dplr.py |
# Adapted from https://github.com/HazyResearch/zoo
# in turn adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/feature_maps/fourier_features.py
"""Implementation of Performer model (https://arxiv.org/abs/2009.14794)."""
import math
import torch
from einops import rearrange, repeat
from fast_transformers.feature_maps.base import FeatureMap
def orthogonal_matrix_chunk(cols, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block)
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
# kernel functions
# transcribed from jax to pytorch from
# https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py
def softmax_kernel(data, *, projection_matrix, is_query, softmax_temp=None, eps=1e-4):
"""For key, we expect shape (b, h, s, d) where s is the sequence dimension
"""
b, h, _, d = data.shape
if softmax_temp is None:
softmax_temp = 1 / math.sqrt(d)
data_normalizer = math.sqrt(softmax_temp)
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps)
return data_dash.type_as(data)
class PerformerFeatures(FeatureMap):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
query_dims: int, The input query dimensions in order to sample
the noise matrix
n_features: int, The size of the feature map (should be divisible by 2)
(default: query_dims)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dims))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
redraw: int, Redraw the random matrix every 'redraw' times
(default: 1)
deterministic_eval: bool, Only redraw the random matrix during training
(default: False)
"""
def __init__(self, query_dims, n_features=None, ortho_scaling=0, softmax_temp=None,
orthogonal=False, redraw=1, deterministic_eval=False):
super().__init__(query_dims)
self.n_features = n_features or int(query_dims * math.log(query_dims))
self.ortho_scaling = ortho_scaling
# TODO: we're not using @orthogonal atm
self.orthogonal = orthogonal
# TODO: we're not using @softmax_temp atm
self.softmax_temp = 1 / math.sqrt(query_dims) if softmax_temp is None else softmax_temp
# self.redraw = redraw
# TODO: not redrawing atm, so I'm setting it to an irrational number
self.redraw = math.pi
self.deterministic_eval = deterministic_eval
# Make a buffer for storing the sampled projection_matrix
self.register_buffer("projection_matrix", torch.zeros(self.query_dims, self.n_features))
self._calls = -1
def new_feature_map(self, device):
# If we are not training skip the generation of a new feature map
if self.deterministic_eval and not self.training:
return
# Only redraw the new feature map every self.redraw times
self._calls += 1
if (self._calls % self.redraw) != 0:
return
projection_matrix = gaussian_orthogonal_random_matrix(nb_rows=self.n_features,
nb_columns=self.query_dims,
scaling=self.ortho_scaling)
self.register_buffer("projection_matrix", projection_matrix.to(device))
def forward_queries(self, x):
return softmax_kernel(x, projection_matrix=self.projection_matrix, is_query=True)
def forward_keys(self, x):
return softmax_kernel(x, projection_matrix=self.projection_matrix, is_query=False)
| state-spaces-main | src/models/sequence/attention/performer.py |
"""Implement linear attention.
From github.com/HazyResearch/transformers
"""
from functools import partial
from contextlib import contextmanager
import torch
import torch.nn as nn
import hydra
from einops import rearrange
from fast_transformers.feature_maps import elu_feature_map
from fast_transformers.masking import TriangularCausalMask
from models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
# linear attention classes with softmax kernel
# non-causal linear attention
# By default Performer uses eps=0.0 here
def linear_attention(q, k, v, eps=0.0, need_weights=False):
k_cumsum = k.sum(dim=-2)
D_inv = 1. / (torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) + eps)
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
attn = None if not need_weights else torch.einsum('...te,...se,...s->...ts', q, k, D_inv)
return out, attn
@contextmanager
def null_context():
yield
# efficient causal linear attention, created by EPFL
def causal_linear_attention(q, k, v, eps=1e-6, need_weights=False):
from fast_transformers.causal_product import CausalDotProduct
autocast_enabled = torch.is_autocast_enabled()
is_half = isinstance(q, torch.cuda.HalfTensor)
assert not is_half or APEX_AVAILABLE, 'half tensors can only be used if nvidia apex is available'
cuda_context = null_context if not autocast_enabled else partial(autocast, enabled = False)
causal_dot_product_fn = amp.float_function(CausalDotProduct.apply) if is_half else CausalDotProduct.apply
k_cumsum = k.cumsum(dim=-2) + eps
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
with cuda_context():
if autocast_enabled:
q, k, v = map(lambda t: t.float(), (q, k, v))
out = causal_dot_product_fn(q, k, v)
if need_weights:
attn = torch.einsum('...te,...se,...s', q, k, D_inv)
causal_mask = torch.triu(torch.ones(q.shape[-2], k.shape[-2], dtype=torch.bool,
device=k.device), diagonal=1)
attn.masked_fill_(causal_mask, 0.0)
else:
attn = None
out = torch.einsum('...nd,...n->...nd', out, D_inv)
return out, None
# inefficient causal linear attention, without cuda code, for reader's reference
# not being used
# Adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/linear_attention.py
class LinearAttention(nn.Module):
"""Implement unmasked attention using dot product of feature maps in
O(N D^2) complexity.
Given the query, key and value as Q, K, V instead of computing
V' = softmax(Q.mm(K.t()), dim=-1).mm(V),
we make use of a feature map function Φ(.) and perform the following
computation
V' = normalize(Φ(Q).mm(Φ(K).t())).mm(V).
The above can be computed in O(N D^2) complexity where D is the
dimensionality of Q, K and V and N is the sequence length. Depending on the
feature map, however, the complexity of the attention might be limited.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
"""
# def __init__(self, query_dims, feature_map_cfg=None, eps=1e-6):
def __init__(self, d_model, n_heads, feature_map_cfg=None, eps=1e-6, dropout=0.0): # TODO dropout not used
super().__init__()
query_dims = d_model // n_heads
self.n_heads = n_heads
self.feature_map = (
hydra.utils.instantiate(feature_map_cfg, query_dims) if feature_map_cfg is not None
else elu_feature_map(query_dims)
)
self.eps = eps
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
# Permute the dimensions to BHTE instead of BTHE
query = rearrange(query, 'b t (h e) -> b h t e', h=self.n_heads)
key = rearrange(key, 'b s (h e) -> b h s e', h=self.n_heads)
value = rearrange(value, 'b s (h d) -> b h s d', h=self.n_heads)
# Apply the feature map to the query and key
self.feature_map.new_feature_map(query.device)
Q = self.feature_map.forward_queries(query)
K = self.feature_map.forward_keys(key)
# Apply the key padding mask and make sure that the attn_mask is
# all_ones or is causal
causal = attn_mask is not None and attn_mask.lower_triangular
if not (attn_mask is None or attn_mask.all_ones or causal):
raise RuntimeError(("LinearAttention does not support arbitrary attention masks"))
if causal:
assert query.shape[1] == key.shape[1], 'query and key must have the same sequence length'
if key_padding_mask is not None:
K.masked_fill_(~rearrange(key_padding_mask.bool_matrix, 'b s -> b 1 s 1'), 0.0)
attn_fn = causal_linear_attention if causal else linear_attention
out, attn = attn_fn(Q, K, value, eps=self.eps, need_weights=need_weights)
out = rearrange(out, 'b h s d -> b s (h d)')
return out, attn
@TransposedModule
class Performer(SequenceModule):
# TODO [21-09-29] the MHA class should take options for attention like full, performer, etc. Currently this is essentially duplicated from MultiheadAttention class."""
def __init__(self, d_model, n_heads, *args, causal=True, **kwargs):
super().__init__()
self.d_model = d_model
self.d_output = d_model
self.mha = LinearAttention(d_model, n_heads, *args, **kwargs)
self.causal = causal
def forward(self, src, attn_mask=None, key_padding_mask=None, state=None, **kwargs):
""" state should represent a mask and key padding mask """
if self.causal and attn_mask is None:
attn_mask = TriangularCausalMask(src.size(-2), device=src.device)
# attn_mask, key_padding_mask = state
# Note that this returns None for the second argument
y, z = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
return y, None
def step(self, x, state):
raise NotImplementedError
| state-spaces-main | src/models/sequence/attention/linear.py |
"""Wrapper around nn.MultiheadAttention to adhere to SequenceModule interface."""
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
from einops import rearrange
@TransposedModule
class MultiheadAttention(SequenceModule):
"""Simple wrapper for MultiheadAttention."""
def __init__(self, d_model, n_heads, *args, causal=True, **kwargs):
super().__init__()
self.d_model = d_model
self.d_output = d_model
self.mha = nn.MultiheadAttention(d_model, n_heads, *args, batch_first=True, **kwargs)
self.causal = causal
def forward(self, src, attn_mask=None, key_padding_mask=None, state=None, **kwargs):
if self.causal and attn_mask is None:
attn_mask = torch.triu(torch.ones(src.size(-2), src.size(-2),
dtype=torch.bool, device=src.device),
diagonal=1)
# attn_mask, key_padding_mask = state
# Note that this returns None for the second argument
y, _ = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
return y, None
def step(self, x, state):
# TODO proper cached inference
# x: (B, D)
y, z = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False, **kwargs)
class VitAttention(SequenceModule):
"""Copied from implementation for ViT: only used for ViT model.
This attention class makes several simplifying assumptions (commonly satisfied in vision
applications):
1. q = k = v
2. No masks: no attention mask, no key padding mask
3. Embed dimension = Input dimension, i.e. projection matrices are square.
Arguments:
- packed_linear: whether to pack all 3 q_proj, k_proj, v_proj into 2 matrix.
This option is to be compatible with T2T-ViT pretrained weights,
where there's only one projection weight matrix.
"""
@property
def d_output(self):
return self.dim
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
# proj_drop=0.,
packed_linear=True,
linear_cfg=None,
**kwargs,
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if linear_cfg is not None:
packed_linear = False
self.packed_linear = packed_linear
if packed_linear:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
else:
if linear_cfg is None:
linear_cfg = {'_target_': 'torch.nn.Linear'}
self.q_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.k_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.v_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
# Removing this dropout because we do this in SequenceResidualBlock
# self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, state=None):
B, N, C = x.shape
if self.packed_linear:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
else:
q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)
q, k, v = [rearrange(x, 'b n (h d) -> b h n d', h=self.num_heads) for x in (q, k, v)]
# attn = (q @ k.transpose(-2, -1) * self.scale)
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = q.size()
_, _, k_seq_len, _ = k.size()
q = rearrange(q, 'b h t d -> (b h) t d')
k = rearrange(k, 'b h s d -> (b h) d s')
# Preallocate attn_weights for `baddbmm`
attn = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=q.dtype, device=q.device)
attn = rearrange(torch.baddbmm(attn, q, k, beta=0, alpha=self.scale),
'(b h) t s -> b h t s', h = self.num_heads)
attn = F.softmax(attn, dim=-1, dtype=v.dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
# x = self.proj_drop(x)
return x, None
| state-spaces-main | src/models/sequence/attention/mha.py |
# Expose the cell registry and load all possible cells
from .cells.basic import CellBase
from .cells import basic
from .cells import hippo
from .cells import timestamp
from . import sru
| state-spaces-main | src/models/sequence/rnns/__init__.py |
"""Implements variant of HiPPO-RNN that doesn't feed the hidden and memory states into each other time-wise, instead using simpler linear recurrences in time and letting them interact depthwise.
[21-10-22] AG: This was old experimental code. It should still work (perhaps with some minimal modifications), but there is not much reason to use this now. This was the initial step toward "deep linear parallelizable" versions of the HiPPO RNN which culminated in LSSL and S3.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import signal
from src.models.nn import LinearActivation
from src.models.functional import unroll
from src.models.hippo.hippo import transition
from src.models.hippo.transition import TLagTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LegSAdaptiveTransitionManual, LagTCumsumAdaptiveTransition, TLagTCumsumAdaptiveTransition
from src.models.sequence.base import SequenceModule
class MemoryProjection(nn.Module):
"""Implements the memory projection operator for fixed dt."""
def __init__(self, order, measure, dt, discretization='bilinear'):
super().__init__()
self.order = order
A, B = transition(measure, order)
C = np.ones((1, order))
D = np.zeros((1,))
# dt, discretization options
A, B, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
self.register_buffer('A', torch.Tensor(A))
self.register_buffer('B', torch.Tensor(B))
def forward(self, inputs):
"""
inputs : (length, batch, size)
output : (length, batch, size, order)
# TODO this puts the unsqueeze inside here rather than outside, should make RNN versions the same
"""
# L, B, S = inputs.shape
inputs = inputs.unsqueeze(-1)
u = F.linear(inputs, self.B)
# output = unroll.unroll(self.A, u)
output = unroll.parallel_unroll_recursive(self.A, u)
# output = unroll.parallel_unroll_iterative(self.A, u)
# output = unroll.variable_unroll(self.A, u, variable=False)
# m = inputs.new_zeros(B, S, self.order)
# outputs = []
# for input in torch.unbind(inputs, dim=0):
# m = m + F.linear(m, self.A) + F.linear(input, self.B)
# output = torch.stack(outputs, dim=0)
return output
class VariableMemoryProjection(nn.Module):
"""Version of MemoryProjection with variable discretization.
Materializes the transition matrices.
"""
def __init__(self, order=1, measure='legs', dt=None):
super().__init__()
self.order = order
self.measure = measure
self.dt = dt
# TODO incorporate measure
if self.measure == 'legs':
self.transition = LegSAdaptiveTransitionManual(self.order)
elif self.measure == 'legt':
self.transition = LegTAdaptiveTransitionManual(self.order)
elif self.measure == 'lagt':
self.transition = LagTAdaptiveTransitionManual(self.order)
elif self.measure == 'tlagt':
self.transition = TLagTAdaptiveTransitionManual(self.order)
else:
assert False, f"VariableMemoryProjection: measure {measure} not allowed"
# Cached tensors
self.register_buffer('I', torch.eye(self.order))
self.register_buffer('zero', torch.zeros(self.order, self.order))
def forward(self, inputs, dt=None):
"""
inputs : (L, B, M)
dt : (L, B, M)
output : (L, B, M, N) [length, batch, size, order]
# TODO this puts the input unsqueeze inside here rather than outside, should make RNN versions the same
"""
L, B, M = inputs.shape
# Construct discretization if necessary
if dt is None:
if self.dt is None:
dt = torch.cumsum(inputs.new_ones(L), dim=0) # no new_arange
dt = (1./dt)[:, None, None] # (L, 1, 1)
else:
dt = torch.full((L, 1, 1), self.dt).to(inputs) # fixed dt
# Create transition matrices
# I = self.I[:, None, None, None, :].expand((self.order, L, B, M, self.order)) # (N, L, B, M, N)
I = self.I[:, None, None, None, :].repeat((1, L, B, M, 1)) # (N, L, B, M, N)
As = self.transition.bilinear(dt, I, 0) # (N, L, B, M, N) # NOTE due to the broadcasting here, the ManualTransition actually swaps axes back for efficiency; can potential save if this axis reordering is too slow [probably not a bottleneck]
As = As.permute((1, 2, 3, 0, 4)) # (L, B, M, N, N)
# TODO this A might be transposed; should print to compare
# print(As.shape)
Bs = self.transition.bilinear(dt, inputs.new_zeros(self.order), 1) # (L, B, M, N)
inputs = inputs.unsqueeze(-1)
# u = F.linear(inputs, self.transition.B) # (L, B, M, N)
# u = F.linear(inputs, Bs) # (L, B, M, N)
u = inputs * Bs # (L, B, M, N)
# output = unroll.unroll(self.A, u)
# output = unroll.parallel_unroll_recursive(self.A, u)
output = unroll.variable_unroll(As, u, variable=True)
# output = unroll.parallel_unroll_iterative(self.A, u)
return output
class ToeplitzMemoryProjection(nn.Module):
def __init__(self, order, measure, measure_args={}):
super().__init__()
self.N = order
if measure == 'lagt':
self.transition = LagTCumsumAdaptiveTransition(self.N)
elif measure == 'glagt':
# TODO this is broken
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
self.transition = GLagTCumsumAdaptiveTransition(self.N, alpha, beta)
else:
assert False, f"ToeplitzMemoryProjection: measure {measure} not supported"
e = torch.zeros(self.N)
e[0] = 1
self.register_buffer('e', e) # the e_0 basis vector
def forward(self, inputs, dt):
"""
inputs : (L, B, M)
dt : (L, B, M)
output : (L, B, M, N) [length, batch, size, order]
# TODO this puts the unsqueeze inside here rather than outside, should make RNN versions the same
"""
L, B, M = inputs.shape
I = self.e.repeat((L, B, M, 1)) # (L, B, M, N)
# I = self.e.repeat(inputs.shape+(1,)) # (L, B, M, N)
As = self.transition.bilinear(dt, I, torch.zeros_like(dt)) # (L, B, M, N)
# Bs = self.transition.bilinear(dt, torch.zeros_like(I), torch.ones_like(dt)) # (L, B, M, N)
Bs = self.transition.bilinear(dt, torch.zeros_like(I), inputs) # (L, B, M, N)
output = unroll.variable_unroll_toeplitz(As, Bs, pad=False)
# print("HERE")
return output
class HiPPOQRNN(SequenceModule):
# TODO dropout?
def __init__(
self,
d_input, d_model=256, memory_size=1, memory_order=-1,
variable=False, dt=0.01,
measure='lagt', measure_args={},
dropout=0.0,
):
super().__init__()
if dropout > 0.0:
raise NotImplementedError("Dropout currently not supported for QRNN")
if memory_order < 0:
memory_order = d_model
self.d_input = d_input
self.d_model = d_model
self.memory_size = memory_size
self.memory_order = memory_order
self.variable = variable
self.dt = dt
# TODO deal with initializers
preact_ctor = LinearActivation
preact_args = [self.d_input + self.memory_size * self.memory_order, self.d_model, True]
self.W_hmx = preact_ctor(*preact_args)
if self.variable:
self.W_uh = nn.Linear(self.d_input, 2*self.memory_size)
if measure in ['lagt', 'tlagt']:
self.memory_proj = ToeplitzMemoryProjection(memory_order, measure, measure_args)
else:
self.memory_proj = VariableMemoryProjection(memory_order, measure)
else:
self.W_uh = nn.Linear(self.d_input, self.memory_size)
self.memory_proj = MemoryProjection(memory_order, measure, dt)
self.hidden_activation_fn = torch.tanh
self.memory_activation_fn = nn.Identity()
# @profile
def forward(self, inputs, return_output=False):
"""
inputs : [length, batch, dim]
"""
L, B, d_input = inputs.shape
assert d_input == self.d_input
u = self.memory_activation_fn(self.W_uh(inputs)) # (L, B, memory_size)
if self.variable:
# Automatic scaling dt
M = self.memory_size
# dt = torch.full((L, 1, 1), self.dt).to(inputs) # fixed dt to test
dt = torch.sigmoid(u[..., M:]) # variable dt
u = u[..., :M]
m = self.memory_proj(u, dt)
else:
m = self.memory_proj(u) # (L, B, M, N)
mx = torch.cat((m.view(L, B, self.memory_size*self.memory_order), inputs), dim=-1) # length, batch, d_input
h = self.hidden_activation_fn(self.W_hmx(mx)) # length, batch, d_model
if return_output:
return h, h[-1, ...]
else:
return None, h[-1, ...]
def default_state(self, x, batch_shape):
raise NotImplementedError("Needs to be implemented.")
def step(self, x, state):
raise NotImplementedError("Needs to be implemented.")
@property
def d_state(self):
return self.d_model
@property
def d_output(self):
return self.d_model
@property
def state_to_tensor(self):
return lambda state: state
| state-spaces-main | src/models/sequence/rnns/qrnn.py |
import torch
import torch.nn as nn
import src.utils as utils
from src.models.sequence.rnns.cells import CellBase
from src.models.sequence import SequenceModule
# [21-09-12 AG]: We previously set up a way to register RNNCell classes, which gives them a "local" name
# To convert this mapping from name to constructor, we use the fact that the str representation of a constructor is "<class '_target_'>"
# TODO should convert this to an explicit dictionary
cell_registry = {
name: str(target)[8:-2]
for name, target in CellBase.registry.items()
}
class RNN(SequenceModule):
def __init__(self, d_input, cell=None, return_output=True, transposed=False, dropout=0.0):
"""
return_output: if False, only returns the state
"""
super().__init__()
self.transposed = transposed
if dropout > 0.0:
raise NotImplementedError("Dropout currently not supported for custom RNNs")
self.return_output = return_output
self.cell = utils.instantiate(cell_registry, cell, d_input)
def forward(self, inputs, state=None, **kwargs):
"""
cell.forward : (input, state) -> (output, state)
inputs : [n_batch, l_seq, d]
"""
if self.transposed: inputs = inputs.transpose(-1, -2)
# Automatically detect PackedSequence
if isinstance(inputs, nn.utils.rnn.PackedSequence):
return PackedRNN.forward(self, inputs)
# Construct initial state
state = self.cell.default_state(*inputs.shape[:-2], device=inputs.device)
outputs = []
for input in torch.unbind(inputs, dim=-2):
output, new_state = self.step(input, state)
state = new_state
if self.return_output:
outputs.append(output)
outputs = torch.stack(outputs, dim=-2) if self.return_output else None
if self.transposed and outputs is not None: outputs = outputs.transpose(-1, -2)
return outputs, state
def step(self, x, state):
return self.cell.step(x, state)
def default_state(self, *args, **kwargs):
return self.cell.default_state(*args, **kwargs)
@property
def d_state(self):
"""Size after converting state to a single tensor."""
return self.cell.d_state
@property
def d_output(self):
"""Size of output."""
return self.cell.d_output
@property
def state_to_tensor(self):
"""Convert state into a single tensor output."""
return self.cell.state_to_tensor
class PackedRNN(RNN):
"""Version of RNN that expected a nn.utils.rnn.PackedSequence."""
@staticmethod
def apply_tuple(tup, fn):
"""Apply a function to a Tensor or a tuple of Tensor"""
if isinstance(tup, tuple):
return tuple((fn(x) if isinstance(x, torch.Tensor) else x) for x in tup)
else:
return fn(tup)
@staticmethod
def concat_tuple(tups, dim=0):
"""Concat a list of Tensors or a list of tuples of Tensor."""
if isinstance(tups[0], tuple):
return tuple(
(torch.cat(xs, dim) if isinstance(xs[0], torch.Tensor) else xs[0])
for xs in zip(*tups)
)
else:
return torch.cat(tups, dim)
def forward(self, inputs, len_batch=None):
# assert len_batch is not None
# inputs = nn.utils.rnn.pack_padded_sequence(
# inputs, len_batch.cpu(), enforce_sorted=False
# )
assert isinstance(inputs, nn.utils.rnn.PackedSequence)
# Similar implementation to https://github.com/pytorch/pytorch/blob/9e94e464535e768ad3444525aecd78893504811f/torch/nn/modules/rnn.py#L202
inputs, batch_sizes, sorted_indices, unsorted_indices = inputs
max_batch_size = batch_sizes[0]
# Construct initial state
state = self.cell.default_state(max_batch_size, device=inputs.device)
outputs = []
# Following implementation at https://github.com/pytorch/pytorch/blob/9e94e464535e768ad3444525aecd78893504811f/aten/src/ATen/native/RNN.cpp#L621
# Batch sizes is a sequence of decreasing lengths, which are offsets
# into a 1D list of inputs. At every step we slice out batch_size elements,
# and possibly account for the decrease in the batch size since the last step,
# which requires us to slice the hidden state (since some sequences
# are completed now). The sliced parts are also saved, because we will need
# to return a tensor of final hidden state.
batch_sizes_og = batch_sizes
batch_sizes = batch_sizes.detach().cpu().numpy()
input_offset = 0
last_batch_size = batch_sizes[0]
saved_states = []
for batch_size in batch_sizes:
step_input = inputs[input_offset : input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if dec > 0:
saved_state = PackedRNN.apply_tuple(state, lambda x: x[batch_size:])
state = PackedRNN.apply_tuple(state, lambda x: x[:batch_size])
saved_states.append(saved_state)
last_batch_size = batch_size
output, new_state = self.cell.forward(step_input, state)
state = new_state
if self.return_output:
outputs.append(output)
saved_states.append(state)
saved_states.reverse()
state = PackedRNN.concat_tuple(saved_states)
state = PackedRNN.apply_tuple(
state,
lambda x: x[unsorted_indices] if unsorted_indices is not None else x,
)
if self.return_output:
outputs = nn.utils.rnn.PackedSequence(
torch.cat(outputs, dim=0),
batch_sizes_og,
sorted_indices,
unsorted_indices,
)
# outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
else:
outputs = None
return outputs, state
| state-spaces-main | src/models/sequence/rnns/rnn.py |
"""Implementation of the Simple Recurrent Unit.
https://arxiv.org/abs/1709.02755
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from src.models.sequence.rnns.cells import CellBase
from src.models.nn import LinearActivation
import src.models.nn.utils as U
from src.models.sequence.base import SequenceModule, TransposedModule
class SRUCell(CellBase):
"""Implementation of the pure SRU cell that works with the models.rnns.rnn.RNN class."""
name = 'sru'
valid_keys = ['fx', 'rx', 'bias']
@property
def default_initializers(self):
return {
'fx': 'xavier',
'rx': 'xavier',
}
@property
def default_architecture(self):
return {
'bias': True,
}
def __init__(
self, d_input, d_model,
residual='H', # Highway, Residual, None
offset=True, # whether to use previous or current cell to compute highway gate
**kwargs
):
self.offset = offset
self.residual = residual
assert self.residual in ['H', 'R', 'N']
super().__init__(d_input, d_model, **kwargs)
def reset_parameters(self):
self.W = LinearActivation(self.d_input, self.d_model, bias=self.architecture['bias'])
# gate
self.W_fx = LinearActivation(self.d_input, self.d_model, bias=True, initializer=self.initializers['fx'], activation='sigmoid')
self.W_fc = nn.Parameter(torch.randn(self.d_model))
# highway
if self.residual == 'H':
self.W_rx = LinearActivation(self.d_input, self.d_model, bias=True, initializer=self.initializers['rx'], activation='sigmoid')
self.W_rc = nn.Parameter(torch.randn(self.d_model))
# resize input
if self.d_input != self.d_model:
self.residual_transform = nn.Linear(self.d_input, self.d_model)
else:
self.residual_transform = nn.Identity()
def forward(self, x, c):
### Update hidden state
g = torch.sigmoid(self.W_fx(x) + self.W_fc * c)
c_ = (1.-g) * c + g * self.W(x)
if self.residual == 'H':
if self.offset:
r = torch.sigmoid(self.W_rx(x) + self.W_rc * c)
else:
r = torch.sigmoid(self.W_rx(x) + self.W_rc * c_)
h = (1-r) * self.residual_transform(x) + r * c_
elif self.residual == 'R':
h = c_ + self.residual_transform(x)
else:
h = c_
return h, c_
class SRURNNGate(nn.Module):
"""The gate/cell state computation of SRU."""
def __init__(self, d_model, feedback=True):
"""
feedback: control whether cell state feeds back into itself. If False, this is essentially a QRNN reduce
"""
super().__init__()
self.d_model = d_model
self.feedback = feedback
if self.feedback:
self.W_fc = nn.Parameter(torch.randn(self.d_model))
def forward(self, f, u, state=None):
"""
f, u: (batch, length, dim)
"""
# If no feedback, batch the sigmoid computation
if not self.feedback:
f = torch.sigmoid(f)
if state is None:
c = f.new_zeros((f.shape[0], f.shape[2]), requires_grad=False)
else:
assert state.shape == (f.shape[0], f.shape[2])
c = state
cs = []
for f_, u_ in zip(torch.unbind(f, dim=-2), torch.unbind(u, dim=-2)):
if self.feedback:
f_ = torch.sigmoid(f_ + self.W_fc * c)
c = (1.-f_) * c + f_ * u_
cs.append(c)
return torch.stack(cs, dim=1), c
@TransposedModule
class SRURNN(SequenceModule):
"""Full RNN layer implementing the SRU (not just a Cell)."""
def __init__(self, d_input, d_model=None, feedback=True, return_output=True, dropout=0.0):
super().__init__()
if d_model is None: d_model = d_input
self.d_input = d_input
self.d_model = d_model
self.return_output = return_output
self.W_fused = LinearActivation(d_input, 2*self.d_model, bias=True)
self.C = SRURNNGate(d_model, feedback=feedback)
if dropout > 0.0:
raise NotImplementedError("Dropout currently not supported for SRU")
def forward(self, x, state=None):
ufr = self.W_fused(x)
ufr = rearrange(ufr, 'b l (c d) -> b l c d', c=2)
u, fx = torch.unbind(ufr, dim=2) # (B, L, H)
y, c = self.C(fx, u, state=state) # (B, L, H)
if self.return_output:
return y, c
else:
return None, c
@property
def d_state(self):
return self.d_model
@property
def d_output(self):
return self.d_model
@property
def state_to_tensor(self):
return lambda state: state
# TODO default_state, step functions
| state-spaces-main | src/models/sequence/rnns/sru.py |
"""The core RNN cell architecture of the HiPPO-RNN from the original HiPPO paper."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from src.models.sequence.rnns.cells.basic import RNNCell
from src.models.nn import LinearActivation, Activation # , get_initializer
from src.models.nn.gate import Gate
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class MemoryCell(RNNCell):
"""This class handles the general architectural wiring of the HiPPO-RNN, in particular the interaction between the hidden state and the linear memory state.
Specific variants can be instantiated by subclassing this with an appropriately defined update_memory() method.
"""
name = None
valid_keys = ['uxh', 'ux', 'uh', 'um', 'hxm', 'hx', 'hm', 'hh', 'bias', ]
@property
def default_initializers(self):
return {
'uxh': 'uniform',
'hxm': 'xavier',
'um': 'zero',
'hh': 'xavier',
}
@property
def default_architecture(self):
return {
'ux': True,
'hx': True,
'hm': True,
'hh': False,
'bias': True,
}
def __init__(
self, d_input, d_model, memory_size, memory_order,
memory_activation='id',
gate='G', # 'N' | 'G' | UR'
**kwargs
):
self.memory_size = memory_size
self.memory_order = memory_order
self.memory_activation = memory_activation
self.gate = gate
super(MemoryCell, self).__init__(d_input, d_model, **kwargs)
self.input_to_d_model = self.d_input if self.architecture['hx'] else 0
self.input_to_memory_size = self.d_input if self.architecture['ux'] else 0
# Hidden to memory
self.W_uxh = LinearActivation(
self.input_to_memory_size + self.d_model,
self.memory_size,
bias=self.architecture['bias'],
initializer=self.initializers['uxh'],
activation=self.memory_activation,
activate=True,
)
self.memory_to_d_model = self.memory_size * self.memory_order if self.architecture['hm'] else 0
# Memory to hidden
self.W_hxm = LinearActivation(
self.input_to_d_model + self.memory_to_d_model,
self.d_model,
self.architecture['bias'],
initializer=self.initializers['hxm'],
activation=self.hidden_activation,
activate=False,
)
if self.architecture['hh']:
self.reset_hidden_to_hidden()
else:
self.W_hh = None
# Construct gate with options
if self.gate is not None:
preact_ctor = LinearActivation
preact_args = [
self.input_to_d_model + self.memory_to_d_model,
self.d_model,
self.architecture['bias'],
]
if self.architecture['hh']:
print("input to hidden size, memory to hidden size, hidden size:", self.input_to_d_model, self.memory_to_d_model, self.d_model)
preact_args[0] += self.d_model
self.W_gxm = Gate(self.d_model, preact_ctor, preact_args, mechanism=self.gate)
def reset_parameters(self):
# super().reset_parameters() # TODO find a way to refactor to call super()
self.activate = Activation(self.hidden_activation, self.d_model)
def forward(self, input, state):
h, m, time_step = state
# Update the memory
u = self.forward_memory(input, h, m)
m = self.update_memory(m, u, time_step) # (batch, memory_size, memory_order)
# Update hidden
h = self.forward_hidden(input, h, m)
next_state = (h, m, time_step + 1)
output = self.state_to_tensor(next_state)
return output, next_state
def forward_memory(self, input, h, m):
"""First part of forward pass to construct the memory state update."""
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
xh = torch.cat((input_to_memory, h), dim=-1)
# Construct the update features
u = self.W_uxh(xh) # (batch, memory_size)
return u
def forward_hidden(self, input, h, m):
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
# Update hidden state from memory
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
xm = torch.cat((input_to_hidden, memory_to_hidden), dim=-1)
hidden_preact = self.W_hxm(xm)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.activate(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
xm = torch.cat((xm, h), dim=-1)
g = self.W_gxm(xm)
h = (1.-g) * h + g * hidden
return h
def update_memory(self, m, u, time_step):
"""
m: (B, M, N) [batch size, memory size, memory order]
u: (B, M)
Output: (B, M, N)
"""
raise NotImplementedError
def default_state(self, *batch_shape, device=None):
return (
torch.zeros(*batch_shape, self.d_model, device=device, requires_grad=False),
torch.zeros(*batch_shape, self.memory_size, self.memory_order, device=device, requires_grad=False),
0,
)
@property
def state_to_tensor(self):
"""Converts a state into a single output (tensor)."""
def fn(state):
h, m, time_step = state
return h
return fn
@property
def d_state(self):
return self.d_model
@property
def d_output(self):
return self.d_model
class LTICell(MemoryCell):
"""A cell where the memory state follows Linear Time Invariant dynamics: c' = Ac + Bf."""
def __init__(
self, d_input, d_model, memory_size, memory_order,
A, B,
dt=0.01,
discretization='zoh',
**kwargs
):
super().__init__(d_input, d_model, memory_size, memory_order, **kwargs)
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dA = dA - np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(dA))
self.register_buffer('B', torch.Tensor(dB))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
return m + F.linear(m, self.A) + F.linear(u, self.B)
class LSICell(MemoryCell):
"""A cell where the memory state Linear 'Scale' Invariant dynamics: c' = 1/t (Ac + Bf)."""
def __init__(
self, d_input, d_model, memory_size, memory_order,
A, B,
init_t = 0, # 0 for special case at t=0 (new code), else old code without special case
l_max=1024,
discretization='bilinear',
**kwargs
):
# TODO: make init_t start at arbitrary time (instead of 0 or 1)
# B should have shape (N, 1)
assert len(B.shape) == 2 and B.shape[1] == 1
super().__init__(d_input, d_model, memory_size, memory_order, **kwargs)
assert isinstance(init_t, int)
self.init_t = init_t
self.l_max = l_max
A_stacked = np.empty((l_max, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((l_max, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, l_max + 1):
At = A / t
Bt = B / t
if discretization in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif discretization in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(A_stacked))
self.register_buffer('B', torch.Tensor(B_stacked))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
t = time_step - 1 + self.init_t
if t < 0:
return F.pad(u, (0, self.memory_order - 1))
else:
if t >= self.l_max: t = self.l_max - 1
return m + F.linear(m, self.A[t]) + F.linear(u, self.B[t])
| state-spaces-main | src/models/sequence/rnns/cells/memory.py |
"""Implementation of the 'MinimalRNN', which a reviewer from NeurIPS2020 asked us to compare against.
https://arxiv.org/abs/1711.06788
[21-10-22] I believe this has not been tested in awhile but should work with minimal modifications
"""
from src.models.sequence.rnns.cells.basic import CellBase
from src.models.nn import LinearActivation
from src.models.nn.gate import Gate
class MinimalRNNCell(CellBase):
name = 'mrnn'
valid_keys = ['hx', 'bias']
@property
def default_initializers(self):
return {
'hx': 'xavier',
}
@property
def default_architecture(self):
return {
'bias': True,
}
def __init__(
self, d_input, d_model,
hidden_activation='tanh',
zero_bias_init=False,
**kwargs
):
self.hidden_activation = hidden_activation
self.zero_bias_init=zero_bias_init
super().__init__(d_input, d_model, **kwargs,)
def reset_parameters(self):
self.W_hx = LinearActivation(
self.d_input, self.d_model,
bias=self.architecture['bias'], zero_bias_init=self.zero_bias_init,
initializer=self.initializers['hx'], activation=self.hidden_activation,
activate=True,
)
# get_initializer(self.initializers['hx'], self.hidden_activation)(self.W_hx.weight)
# self.hidden_activation_fn = Activate(self.hidden_activation, self.d_model)
preact_ctor = LinearActivation
preact_args = [self.d_input + self.d_model, self.d_model, self.architecture['bias']]
self.W_g = Gate(self.d_model, preact_ctor, preact_args, mechanism='G')
def forward(self, input, h):
# Update hidden state
# hidden_preact = self.W_hx(input)
# hidden = self.hidden_activation_fn(hidden_preact)
hidden = self.W_hx(input)
hx = torch.cat((input, h), dim=-1)
g = self.W_g(hx)
h = (1.-g) * h + g * hidden
return h, h
| state-spaces-main | src/models/sequence/rnns/cells/minimalrnn.py |
from .basic import CellBase
| state-spaces-main | src/models/sequence/rnns/cells/__init__.py |
"""Baseline simple RNN cells such as the vanilla RNN and GRU."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.nn import LinearActivation, Activation # , get_initializer
from src.models.nn.gate import Gate
from src.models.nn.orthogonal import OrthogonalLinear
from src.models.sequence.base import SequenceModule
class CellBase(SequenceModule):
"""Abstract class for our recurrent cell interface.
Passes input through.
"""
registry = {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Only register classes with @name attribute
if hasattr(cls, 'name') and cls.name is not None:
cls.registry[cls.name] = cls
name = 'id'
valid_keys = []
@property
def default_initializers(self):
return {}
@property
def default_architecture(self):
return {}
def __init__(self, d_input, d_model, initializers=None, architecture=None):
super().__init__()
self.d_input = d_input
self.d_model = d_model
self.architecture = self.default_architecture
self.initializers = self.default_initializers
if initializers is not None:
self.initializers.update(initializers)
print("Initializers:", initializers)
if architecture is not None:
self.architecture.update(architecture)
assert set(self.initializers.keys()).issubset(self.valid_keys)
assert set(self.architecture.keys()).issubset(self.valid_keys)
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input, hidden):
"""Returns output, next_state."""
return input, input
def default_state(self, *batch_shape, device=None):
return torch.zeros(
*batch_shape, self.d_model,
device=device,
requires_grad=False,
)
def step(self, x, state):
return self.forward(x, state)
@property
def state_to_tensor(self):
return lambda state: state
@property
def d_state(self):
return self.d_model
@property
def d_output(self):
return self.d_model
class RNNCell(CellBase):
name = 'rnn'
valid_keys = ['hx', 'hh', 'bias']
default_initializers = {
'hx': 'xavier',
'hh': 'xavier',
}
default_architecture = {
'bias': True,
}
def __init__(
self, d_input, d_model,
hidden_activation='tanh',
orthogonal=False,
ortho_args=None,
zero_bias_init=False,
**kwargs
):
self.hidden_activation = hidden_activation
self.orthogonal = orthogonal
self.ortho_args = ortho_args
self.zero_bias_init=zero_bias_init
super().__init__(d_input, d_model, **kwargs)
def reset_parameters(self):
self.W_hx = LinearActivation(
self.d_input, self.d_model,
bias=self.architecture['bias'],
zero_bias_init=self.zero_bias_init,
initializer=self.initializers['hx'],
activation=self.hidden_activation,
# apply_activation=False,
activate=False,
)
self.activate = Activation(self.hidden_activation, self.d_model)
self.reset_hidden_to_hidden()
def reset_hidden_to_hidden(self):
if self.orthogonal:
if self.ortho_args is None:
self.ortho_args = {}
self.ortho_args['d_input'] = self.d_model
self.ortho_args['d_output'] = self.d_model
self.W_hh = OrthogonalLinear(**self.ortho_args)
else:
self.W_hh = LinearActivation(
self.d_model, self.d_model,
bias=self.architecture['bias'],
zero_bias_init=self.zero_bias_init,
initializer=self.initializers['hh'],
activation=self.hidden_activation,
# apply_activation=False,
activate=False,
)
# self.W_hh = nn.Linear(self.d_model, self.d_model, bias=self.architecture['bias'])
# get_initializer(self.initializers['hh'], self.hidden_activation)(self.W_hh.weight)
def forward(self, input, h):
# Update hidden state
hidden_preact = self.W_hx(input) + self.W_hh(h)
hidden = self.activate(hidden_preact)
return hidden, hidden
class GatedRNNCell(RNNCell):
name = 'gru'
def __init__(
self, d_input, d_model,
gate='G', # 'N' | 'G' | 'R' | 'UR'
reset='G',
**kwargs
):
self.gate = gate
self.reset = reset
super().__init__(d_input, d_model, **kwargs)
def reset_parameters(self):
super().reset_parameters()
# self.reset_gate()
# def reset_gate(self):
preact_ctor = LinearActivation
preact_args = [self.d_input + self.d_model, self.d_model, self.architecture['bias']]
self.W_g = Gate(self.d_model, preact_ctor, preact_args, mechanism=self.gate)
self.W_reset = Gate(self.d_model, preact_ctor, preact_args, mechanism=self.reset)
def forward(self, input, h):
hx = torch.cat((input, h), dim=-1)
reset = self.W_reset(hx)
_, update = super().forward(input, reset*h)
g = self.W_g(hx)
h = (1.-g) * h + g * update
return h, h
class ExpRNNCell(RNNCell):
"""Implementation of expRNN.
Note: there is a subtle distinction between this and the ExpRNN original cell
in the initialization of hx, but this shouldn't make a difference.
(Original ExpRNN cell is located in models.nn.exprnn.orthogonal.OrthogonalRNN.)
"""
name = 'exprnn'
def __init__(self, d_input, d_model, orthogonal=True, hidden_activation='modrelu', **kwargs):
super().__init__(d_input, d_model, orthogonal=orthogonal, hidden_activation=hidden_activation, **kwargs)
| state-spaces-main | src/models/sequence/rnns/cells/basic.py |
"""Implementation of full HiPPO-RNN variants."""
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from src.models.sequence.rnns.cells.memory import LTICell, LSICell
from src.models.hippo.hippo import transition
class HiPPOLTICell(LTICell):
measure = None
def __init__(
self, d_input, d_model, memory_size=1, memory_order=-1,
measure_args={},
**kwargs
):
if memory_order < 0:
memory_order = d_model
A, B = transition(type(self).measure, memory_order, **measure_args)
super().__init__(d_input, d_model, memory_size, memory_order, A, B, **kwargs)
class HiPPOLSICell(LSICell):
measure = None
def __init__(
self, d_input, d_model, memory_size=1, memory_order=-1,
measure_args={},
**kwargs
):
if memory_order < 0:
memory_order = d_model
A, B = transition(type(self).measure, memory_order, **measure_args)
super().__init__(d_input, d_model, memory_size, memory_order, A, B, **kwargs)
class LegTCell(HiPPOLTICell):
"""Translated Legendre."""
name = 'legt'
measure = 'legt'
class LegSCell(HiPPOLSICell):
"""Scaled Legendre."""
name = 'legs'
measure = 'legs'
class LagTCell(HiPPOLTICell):
"""Translated Laguerre."""
name = 'lagt'
measure = 'lagt'
def __init__(self, d_input, d_model, dt=1.0, **kwargs):
super().__init__(d_input, d_model, dt=dt, **kwargs)
class GLagTCell(HiPPOLTICell):
"""Translated Generalized Laguerre."""
name = 'glagt'
measure = 'glagt'
def __init__(self, d_input, d_model, dt=1.0, **kwargs):
super().__init__(d_input, d_model, dt=dt, **kwargs)
class LMUCell(HiPPOLTICell):
"""This cell differs from the HiPPO-LegT cell by a normalization in the recurrent matrix A, and different RNN connections and initialization.
https://papers.nips.cc/paper/2019/file/952285b9b7e7a1be5aa7849f32ffff05-Paper.pdf
"""
name = 'lmu'
measure = 'lmu'
@property
def default_initializers(self):
return {
'uxh': 'uniform',
'ux': 'one',
'uh': 'zero',
'um': 'zero',
'hxm': 'xavier',
'hx': 'zero',
'hh': 'zero',
'hm': 'xavier',
}
@property
def default_architecture(self):
return {
'ux': True,
'um': True,
'hx': True,
'hm': True,
'hh': True,
'bias': False,
}
def __init__(self, d_input, d_model, theta=100, dt=1., gate='N', **kwargs):
super().__init__(d_input, d_model, dt=dt/theta, gate=gate, **kwargs)
class RandomCell(LTICell):
""" Ablation: demonstrate that random A matrix is not effective. """
name = 'random'
def __init__(
self, d_input, d_model, memory_size=1, memory_order=-1,
**kwargs
):
if memory_order < 0:
memory_order = d_model
N = memory_order
A = np.random.normal(size=(N, N)) / N**.5
B = np.random.normal(size=(N, 1))
super().__init__(d_input, d_model, memory_size, memory_order, A, B, **kwargs)
# TODO remove the noise cell, rename all the OP stuff into HiPPO
| state-spaces-main | src/models/sequence/rnns/cells/hippo.py |
"""Variants of the HiPPO-RNN that accept timestamped inputs and evolve according to the elapsed time between inputs. Used in original HiPPO paper for irregularly-sampled CharacterTrajectories experiments."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from src.models.sequence.rnns.cells.memory import MemoryCell, forward_aliases, backward_aliases, bilinear_aliases, zoh_aliases
from src.models.hippo.transition import (
LegSAdaptiveTransitionManual,
LegTAdaptiveTransitionManual,
LagTAdaptiveTransitionManual,
LegSTriDInverseAdaptiveTransition,
LegTTriDInverseAdaptiveTransition,
LagTTriDInverseAdaptiveTransition,
)
class TimeMemoryCell(MemoryCell):
"""MemoryCell with timestamped data.
Assumes that first channel of inputs are timestamps.
"""
def __init__(
self,
d_input, d_model, memory_size, memory_order,
measure='legs',
method='trid',
discretization='bilinear',
**kwargs
):
if memory_order < 0:
memory_order = d_model
super().__init__(d_input-1, d_model, memory_size, memory_order, **kwargs)
assert measure in ['legs', 'lagt', 'legt']
assert method in ['dense', 'trid']
transitions = {
'dense': {
'legs': LegSAdaptiveTransitionManual,
'legt': LegTAdaptiveTransitionManual,
'lagt': LagTAdaptiveTransitionManual,
},
'trid': {
'legs': LegSTriDInverseAdaptiveTransition,
'legt': LegTTriDInverseAdaptiveTransition,
'lagt': LagTTriDInverseAdaptiveTransition,
},
}
self.transition = transitions[method][measure](self.memory_order)
if discretization in forward_aliases:
self.transition_fn = partial(self.transition.forward_diff, **kwargs)
elif discretization in backward_aliases:
self.transition_fn = partial(self.transition.backward_diff, **kwargs)
elif discretization in bilinear_aliases:
self.transition_fn = partial(self.transition.bilinear, **kwargs)
else: assert False
def update_memory(self, m, u, t0, t1):
"""This class is intended to be subclassed to the LTI or LSI cases."""
raise NotImplementedError
def forward(self, input, state):
h, m, prev_timestamp = state
timestamp, input = input[:, 0], input[:, 1:]
# Update the memory
u = self.forward_memory(input, h, m)
m = self.update_memory(m, u, prev_timestamp, timestamp) # (batch, memory_size, memory_order)
# Update hidden
h = self.forward_hidden(input, h, m)
next_state = (h, m, timestamp)
output = self.state_to_tensor(next_state)
return output, next_state
class TimeLSICell(TimeMemoryCell):
"""A cell implementing "Linear Scale Invariant" dynamics: c' = Ac + Bf with timestamped inputs.
This class can handle the setting where there is timescale shift, even if the model does not know about it.
"""
name = 'tlsi'
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
if torch.eq(t1, 0.).any():
return F.pad(u.unsqueeze(-1), (0, self.memory_order - 1))
else:
dt = ((t1-t0)/t1).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
class TimeLTICell(TimeMemoryCell):
"""A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf with timestamped inputs.
Unlike HiPPO-LegS with timestamps, this class will not work if there is timescale shift that it does not know about.
However, unlike generic RNNs, it does work if it knows the sampling rate change.
"""
name = 'tlti'
def __init__(
self,
d_input, d_model, memory_size=1, memory_order=-1,
measure='legt',
dt=1.0,
**kwargs
):
if memory_order < 0:
memory_order = d_model
self.dt = dt
super().__init__(d_input, d_model, memory_size, memory_order, measure=measure, **kwargs)
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
dt = self.dt*(t1-t0).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
| state-spaces-main | src/models/sequence/rnns/cells/timestamp.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.sequence.base import SequenceModule
from src.models.sequence.modules.pool import DownPool, UpPool
from src.models.sequence.backbones.block import SequenceResidualBlock
class Sashimi(SequenceModule):
def __init__(
self,
d_model,
n_layers,
pool=[],
expand=1,
ff=2,
prenorm=False,
dropout=0.0,
dropres=0.0,
layer=None,
center_layer=None,
residual=None,
norm=None,
initializer=None,
transposed=True,
interp=0,
act_pool=None,
):
super().__init__()
self.d_model = d_model
H = d_model
self.interp = interp
self.transposed = transposed
# Layer arguments
layer_cfg = layer.copy()
layer_cfg['dropout'] = dropout
layer_cfg['transposed'] = self.transposed
layer_cfg['initializer'] = initializer
center_layer_cfg = center_layer if center_layer is not None else layer_cfg.copy()
center_layer_cfg['dropout'] = dropout
center_layer_cfg['transposed'] = self.transposed
ff_cfg = {
'_name_': 'ffn',
'expand': ff,
'transposed': self.transposed,
'activation': 'gelu',
'initializer': initializer,
'dropout': dropout,
}
def _residual(d, i, layer):
return SequenceResidualBlock(
d,
i,
prenorm=prenorm,
dropout=dropres,
transposed=self.transposed,
layer=layer,
residual=residual if residual is not None else 'R',
norm=norm,
pool=None,
)
# Down blocks
d_layers = []
for p in pool:
# Add sequence downsampling and feature expanding
d_layers.append(DownPool(H, H*expand, stride=p, transposed=self.transposed, activation=act_pool))
H *= expand
self.d_layers = nn.ModuleList(d_layers)
# Center block
c_layers = [ ]
for i in range(n_layers):
c_layers.append(_residual(H, i+1, center_layer_cfg))
if ff > 0: c_layers.append(_residual(H, i+1, ff_cfg))
self.c_layers = nn.ModuleList(c_layers)
# Up blocks
u_layers = []
for p in pool[::-1]:
block = []
H //= expand
block.append(UpPool(H*expand, H, stride=p, transposed=self.transposed, activation=act_pool))
for i in range(n_layers):
block.append(_residual(H, i+1, layer_cfg))
if ff > 0: block.append(_residual(H, i+1, ff_cfg))
u_layers.append(nn.ModuleList(block))
self.u_layers = nn.ModuleList(u_layers)
assert H == d_model
self.norm = nn.LayerNorm(H)
if interp > 0:
interp_layers = []
assert interp % 2 == 0
for i in range(int(math.log2(interp))):
block = []
for j in range(2):
block.append(_residual(H, i+1, layer_cfg))
if ff > 0: block.append(_residual(H, i+1, ff_cfg))
interp_layers.append(nn.ModuleList(block))
self.interp_layers = nn.ModuleList(interp_layers)
@property
def d_output(self):
return self.d_model
def forward(self, x, state=None, **kwargs):
"""
input: (batch, length, d_input)
output: (batch, length, d_output)
"""
if self.interp > 0:
# Interpolation will be used to reconstruct "missing" frames
# Subsample the input sequence and run the SNet on that
x_all = x
x = x[:, ::self.interp, :]
y = torch.zeros_like(x_all)
# Run the interpolating layers
interp_level = self.interp
for block in self.interp_layers:
# Pad to the right and discard the output of the first input
# (creates dependence on the next time step for interpolation)
z = x_all[:, ::interp_level, :]
if self.transposed: z = z.transpose(1, 2)
for layer in block:
z, _ = layer(z)
z = F.pad(z[:, :, 1:], (0, 1), mode='replicate')
if self.transposed: z = z.transpose(1, 2)
y[:, interp_level//2 - 1::interp_level, :] += z
interp_level = int(interp_level // 2)
if self.transposed: x = x.transpose(1, 2)
# Down blocks
outputs = []
outputs.append(x)
for layer in self.d_layers:
x, _ = layer(x)
outputs.append(x)
# Center block
for layer in self.c_layers:
x, _ = layer(x)
x = x + outputs.pop() # add a skip connection to the last output of the down block
for block in self.u_layers:
for layer in block:
x, _ = layer(x)
if isinstance(layer, UpPool):
# Before modeling layer in the block
x = x + outputs.pop()
outputs.append(x)
x = x + outputs.pop() # add a skip connection from the input of the modeling part of this up block
# feature projection
if self.transposed: x = x.transpose(1, 2) # (batch, length, expand)
x = self.norm(x)
if self.interp > 0:
y[:, self.interp - 1::self.interp, :] = x
x = y
return x, None # required to return a state
def default_state(self, *args, **kwargs):
""" x: (batch) """
layers = list(self.d_layers) + list(self.c_layers) + [layer for block in self.u_layers for layer in block]
return [layer.default_state(*args, **kwargs) for layer in layers]
def step(self, x, state, **kwargs):
"""
input: (batch, d_input)
output: (batch, d_output)
"""
# States will be popped in reverse order for convenience
state = state[::-1]
# Down blocks
outputs = [] # Store all layers for SaShiMi
next_state = []
for layer in self.d_layers:
outputs.append(x)
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if x is None: break
# Center block
if x is None:
# Skip computations since we've downsized
skipped = len(self.d_layers) - len(outputs)
for _ in range(skipped + len(self.c_layers)):
next_state.append(state.pop())
for i in range(skipped):
for _ in range(len(self.u_layers[i])):
next_state.append(state.pop())
u_layers = list(self.u_layers)[skipped:]
else:
outputs.append(x)
for layer in self.c_layers:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
u_layers = self.u_layers
for block in u_layers:
for layer in block:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if isinstance(layer, UpPool):
# Before modeling layer in the block
x = x + outputs.pop()
outputs.append(x)
x = x + outputs.pop()
# feature projection
x = self.norm(x)
return x, next_state
| state-spaces-main | src/models/sequence/backbones/sashimi.py |
"""Different deep backbone that is essentially a 1-D UNet instead of ResNet/Transformer backbone.
Sequence length gets downsampled through the depth of the network while number of feature increases.
Then sequence length gets upsampled again (causally) and blocks are connected through skip connections.
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import DictConfig
from einops import rearrange, repeat, reduce
import src.utils as utils
from src.models.sequence.base import SequenceModule
from src.models.sequence.modules.pool import DownPool, UpPool, up_registry, registry as down_registry
from src.models.sequence.backbones.block import SequenceResidualBlock
contract = torch.einsum
class SequenceUNet(SequenceModule):
"""UNet backbone for 1-D sequence models.
`layer` is a Namespace that specifies '_name_', referring to a constructor, and a list of arguments to that layer constructor.
This layer must subscribe to the interface (i) takes a hidden dimension H and sequence length L (ii) forward pass transforms input sequence of shape (B, H, L) to output (B, H, L)
"""
def __init__(
self,
d_model,
n_layers,
pool=[],
pool_mode='linear',
expand=1,
ff=2,
cff=0,
prenorm=False,
dropout=0.0,
dropres=0.0,
layer=None,
center_layer=None,
residual=None,
norm=None,
initializer=None,
# l_max=-1,
transposed=True,
# act_pool=None,
):
super().__init__()
self.d_model = d_model
H = d_model
self.transposed = transposed
# Layer arguments
layer_cfg = layer.copy()
layer_cfg['dropout'] = dropout
layer_cfg['transposed'] = self.transposed
layer_cfg['initializer'] = initializer
print("layer config", layer_cfg)
center_layer_cfg = center_layer if center_layer is not None else layer_cfg.copy()
center_layer_cfg['dropout'] = dropout
center_layer_cfg['transposed'] = self.transposed
ff_cfg = {
'_name_': 'ffn',
'expand': ff,
'transposed': self.transposed,
'activation': 'gelu',
'initializer': initializer,
'dropout': dropout,
}
def _residual(d, i, layer):
return SequenceResidualBlock(
d,
i,
prenorm=prenorm,
dropout=dropres,
transposed=self.transposed,
layer=layer,
residual=residual if residual is not None else 'R',
norm=norm,
pool=None,
)
# Down blocks
d_layers = []
for p in pool:
for i in range(n_layers):
d_layers.append(_residual(H, i+1, layer_cfg))
if ff > 0: d_layers.append(_residual(H, i+1, ff_cfg))
# Add sequence downsampling and feature expanding
d_pool = utils.instantiate(down_registry, pool_mode, H, stride=p, expand=expand, transposed=self.transposed)
d_layers.append(d_pool)
H *= expand
self.d_layers = nn.ModuleList(d_layers)
# Center block
c_layers = [ ]
for i in range(n_layers):
c_layers.append(_residual(H, i+1, center_layer_cfg))
if cff > 0: c_layers.append(_residual(H, i+1, ff_cfg))
self.c_layers = nn.ModuleList(c_layers)
# Up blocks
u_layers = []
for p in pool[::-1]:
H //= expand
u_pool = utils.instantiate(up_registry, pool_mode, H*expand, stride=p, expand=expand, causal=True, transposed=self.transposed)
u_layers.append(u_pool)
for i in range(n_layers):
u_layers.append(_residual(H, i+1, layer_cfg))
if ff > 0: u_layers.append(_residual(H, i+1, ff_cfg))
self.u_layers = nn.ModuleList(u_layers)
assert H == d_model
self.norm = nn.LayerNorm(H)
@property
def d_output(self):
return self.d_model
def forward(self, x, state=None, **kwargs):
"""
input: (batch, length, d_input)
output: (batch, length, d_output)
"""
if self.transposed: x = x.transpose(1, 2)
# Down blocks
outputs = [] # Store all layers for SequenceUNet structure
for layer in self.d_layers:
outputs.append(x)
x, _ = layer(x)
# Center block
outputs.append(x)
for layer in self.c_layers:
x, _ = layer(x)
x = x + outputs.pop()
for layer in self.u_layers:
x, _ = layer(x)
x = x + outputs.pop()
# feature projection
if self.transposed: x = x.transpose(1, 2) # (batch, length, expand)
x = self.norm(x)
return x, None # required to return a state
def default_state(self, *args, **kwargs):
""" x: (batch) """
layers = list(self.d_layers) + list(self.c_layers) + list(self.u_layers)
return [layer.default_state(*args, **kwargs) for layer in layers]
def step(self, x, state, **kwargs):
"""
input: (batch, d_input)
output: (batch, d_output)
"""
# States will be popped in reverse order for convenience
state = state[::-1]
# Down blocks
outputs = [] # Store all layers for SequenceUNet structure
next_state = []
for layer in self.d_layers:
outputs.append(x)
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if x is None: break
# Center block
if x is None:
# Skip computations since we've downsized
skipped = len(self.d_layers) - len(outputs)
for _ in range(skipped+len(self.c_layers)+skipped):
next_state.append(state.pop())
u_layers = list(self.u_layers)[skipped:]
else:
outputs.append(x)
for layer in self.c_layers:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
u_layers = self.u_layers
for layer in u_layers:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
# feature projection
x = self.norm(x)
return x, next_state
| state-spaces-main | src/models/sequence/backbones/unet.py |
"""Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic (batch, length, d_input) -> (batch, length, d_output) transformation.
"""
from functools import partial
from typing import Mapping, Optional
import torch
import torch.nn as nn
from einops import rearrange
from src.utils.config import to_list, to_dict
from src.models.sequence.backbones.block import SequenceResidualBlock
from src.models.sequence.base import SequenceModule
from src.models.nn import Normalization, DropoutNd
class SequenceModel(SequenceModule):
"""Flexible isotropic deep neural network backbone.
Options:
- d_model: Model dimension. Inputs generally have shape (batch, length, d_model).
- n_layers: Number of repeating blocks.
- transposed: Transpose inputs so each layer receives (batch, d_model, length).
- dropout: Dropout parameter applied on every residual and every layer.
- tie_dropout: Tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d.
- prenorm: Pre-norm vs. post-norm placement of the norm layer.
- bidirectional: Concatenate two copies of each layer like a bi-LSTM.
- n_repeat: Each layer is repeated n times per stage before applying (optional) pooling.
- Layer config, must be specified.
- residual: Residual config, or None for no residual.
- norm: Normalization config (e.g. layer vs batch), or None for no norm.
- pool: Config for pooling layer per stage, or None for no pooling.
- track_norms: Log norms of each layer output.
- dropinp: Input dropout.
"""
def __init__(
self,
d_model: int,
n_layers: int = 1,
transposed: bool = False,
dropout: int = 0.0,
tie_dropout: bool = False,
prenorm: bool = True,
bidirectional: bool = False,
n_repeat: int = 1,
layer: Optional[Mapping] = None,
residual: Optional[Mapping] = None,
norm: Optional[Mapping] = None,
pool: Optional[Mapping] = None,
track_norms: bool = True,
dropinp: int = 0.0,
):
super().__init__()
# Save arguments needed for forward pass
self.d_model = d_model
self.transposed = transposed
self.track_norms = track_norms
# Input dropout (not really used)
dropout_fn = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropinp) if dropinp > 0.0 else nn.Identity()
layer = to_list(layer, recursive=False)
# Some special arguments are passed into each layer
for _layer in layer:
# If layers don't specify dropout, add it
if _layer.get('dropout', None) is None:
_layer['dropout'] = dropout
# Ensure all layers are shaped the same way
_layer['transposed'] = transposed
# Duplicate layers
layers = layer * n_layers * n_repeat
# Instantiate layers
_layers = []
d = d_model
for l, layer in enumerate(layers):
# Pool at the end of every n_repeat blocks
pool_cfg = pool if (l+1) % n_repeat == 0 else None
block = SequenceResidualBlock(
d,
l+1,
prenorm=prenorm,
bidirectional=bidirectional,
dropout=dropout,
tie_dropout=tie_dropout,
transposed=transposed,
layer=layer,
residual=residual,
norm=norm,
pool=pool_cfg,
)
_layers.append(block)
d = block.d_output
self.d_output = d
self.layers = nn.ModuleList(_layers)
if prenorm:
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(self.d_output, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(self.d_output, transposed=self.transposed, **norm)
else:
self.norm = nn.Identity()
def forward(self, inputs, *args, state=None, **kwargs):
""" Inputs assumed to be (batch, sequence, dim) """
if self.transposed: inputs = rearrange(inputs, 'b ... d -> b d ...')
inputs = self.drop(inputs)
# Track norms
if self.track_norms: output_norms = [torch.mean(inputs.detach() ** 2)]
# Apply layers
outputs = inputs
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
outputs, state = layer(outputs, *args, state=prev_state, **kwargs)
next_states.append(state)
if self.track_norms: output_norms.append(torch.mean(outputs.detach() ** 2))
if self.norm is not None: outputs = self.norm(outputs)
if self.transposed: outputs = rearrange(outputs, 'b d ... -> b ... d')
if self.track_norms:
metrics = to_dict(output_norms, recursive=False)
self.metrics = {f'norm/{i}': v for i, v in metrics.items()}
return outputs, next_states
@property
def d_state(self):
d_states = [layer.d_state for layer in self.layers]
return sum([d for d in d_states if d is not None])
@property
def state_to_tensor(self):
# Slightly hacky way to implement this in a curried manner (so that the function can be extracted from an instance)
# Somewhat more sound may be to turn this into a @staticmethod and grab subclasses using hydra.utils.get_class
def fn(state):
x = [_layer.state_to_tensor(_state) for (_layer, _state) in zip(self.layers, state)]
x = [_x for _x in x if _x is not None]
return torch.cat( x, dim=-1)
return fn
def default_state(self, *batch_shape, device=None):
return [layer.default_state(*batch_shape, device=device) for layer in self.layers]
def step(self, x, state, **kwargs):
# Apply layers
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
x, state = layer.step(x, state=prev_state, **kwargs)
next_states.append(state)
x = self.norm(x)
return x, next_states
| state-spaces-main | src/models/sequence/backbones/model.py |
"""Implements a full residual block around a black box layer.
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from functools import partial
import torch
from torch import nn
from src.models.nn import Normalization, StochasticDepth, DropoutNd
from src.models.sequence import SequenceModule
from src.models.sequence.modules.pool import registry as pool_registry
from src.models.nn.residual import registry as residual_registry
import src.utils as utils
import src.utils.registry as registry
class SequenceResidualBlock(SequenceModule):
"""Flexible residual block design. See model.py for meaning of options."""
def __init__(
self,
d_input,
i_layer=None, # Only needs to be passed into certain residuals like Decay
prenorm=True,
bidirectional=False,
dropout=0.0,
tie_dropout=False,
transposed=False,
layer=None, # Config for black box module
residual=None, # Config for residual function
norm=None, # Config for normalization layer
pool=None,
drop_path=0.,
):
super().__init__()
self.i_layer = i_layer
self.d_input = d_input
self.prenorm = prenorm
self.bidirectional = bidirectional
self.transposed = transposed
self.layer = utils.instantiate(registry.layer, layer, d_input)
if self.bidirectional:
self.reverse_layer = utils.instantiate(registry.layer, layer, d_input)
self.bidirectional_linear = nn.Linear(2*self.layer.d_output, self.layer.d_output)
# Residual
# d_residual is the output dimension after residual
if residual is None:
self.residual = None
self.d_residual = self.layer.d_output
else:
self.residual = utils.instantiate(residual_registry, residual, i_layer, d_input, self.layer.d_output)
self.d_residual = self.residual.d_output
# Normalization
d_norm = d_input if self.prenorm else self.d_residual
# We don't use config to directly instantiate since Normalization has some special cases
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(d_norm, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(d_norm, transposed=self.transposed, **norm)
# Pool
self.pool = utils.instantiate(pool_registry, pool, self.d_residual, transposed=self.transposed)
# Dropout
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
# Stochastic depth
self.drop_path = StochasticDepth(drop_path, mode='row') if drop_path > 0.0 else nn.Identity()
@property
def d_output(self):
return self.pool.d_output if self.pool is not None else self.d_residual
@property
def d_state(self):
return self.layer.d_state
@property
def state_to_tensor(self):
return self.layer.state_to_tensor
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def forward(self, x, state=None, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm: y = self.norm(y)
# Black box layer
y_for, new_state = self.layer(y, state=state, **kwargs)
if self.bidirectional:
assert state is None
y_rev, _ = self.reverse_layer(y, state=state, **kwargs)
if self.transposed: y = torch.cat([y_for, y_rev], dim=1)
else: y = torch.cat([y_for, y_rev], dim=-1)
y = self.bidirectional_linear(y)
else:
y = y_for
# Residual
if self.residual is not None: y = self.residual(x, self.drop_path(self.drop(y)), self.transposed)
# Post-norm
if self.norm is not None and not self.prenorm: y = self.norm(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
def step(self, x, state, **kwargs):
assert not self.bidirectional
y = x
# Pre-norm
if self.norm is not None and self.prenorm:
y = self.norm.step(y)
# Black box layer
y, state = self.layer.step(y, state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, y, transposed=False) # NOTE this would not work with concat residual function (catformer)
# Post-norm
if self.norm is not None and not self.prenorm:
y = self.norm.step(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
| state-spaces-main | src/models/sequence/backbones/block.py |
"""Implementation of S4ND module (https://arxiv.org/abs/2210.06583)."""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.sequence.kernels import registry as kernel_registry
from src.models.nn import LinearActivation, Activation, DropoutNd
import src.utils.train
import src.utils as utils
log = src.utils.train.get_logger(__name__)
contract = torch.einsum
def multiple_axis_slice(x, L):
"""
x: (..., L1, L2, .., Lk)
L: list of length k [l1, l2, .., lk]
returns: x[..., :l1, :l2, .., :lk]
"""
# TODO I don't see a way to do this programmatically in Pytorch without sacrificing speed so...
assert len(L) > 0
if len(L) == 1:
return x[..., :L[0]]
elif len(L) == 2:
return x[..., :L[0], :L[1]]
elif len(L) == 3:
return x[..., :L[0], :L[1], :L[2]]
elif len(L) == 4:
return x[..., :L[0], :L[1], :L[2], :L[3]]
else: raise NotImplementedError("lol")
class S4ND(SequenceModule):
requires_length = True
def __init__(
self,
d_model,
d_state=64,
l_max=None, # Maximum length of sequence (list or tuple). None for unbounded
dim=2, # Dimension of data, e.g. 2 for images and 3 for video
out_channels=None, # Do depthwise-separable or not
channels=1, # maps 1-dim to C-dim
bidirectional=True,
# Arguments for FF
activation='gelu', # activation in between SS and FF
ln=False, # Extra normalization
final_act=None, # activation after FF
initializer=None, # initializer on FF
weight_norm=False, # weight normalization on FF
hyper_act=None, # Use a "hypernetwork" multiplication
dropout=0.0, tie_dropout=False,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
verbose=False,
trank=1, # tensor rank of C projection tensor
linear=True,
return_state=True,
contract_version=0,
# SSM Kernel arguments
kernel=None, # New option
mode='dplr', # Old option
**kernel_args,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum sequence length, also denoted by L
if this is not known at model creation, or inconvenient to pass in,
set l_max=None and length_correction=True
dropout: standard dropout argument
transposed: choose backbone axis ordering of (B, L, D) or (B, D, L) [B=batch size, L=sequence length, D=feature dimension]
Other options are all experimental and should not need to be configured
"""
super().__init__()
if verbose:
import src.utils.train
log = src.utils.train.get_logger(__name__)
log.info(f"Constructing S4ND (H, N, L) = ({d_model}, {d_state}, {l_max})")
self.h = d_model
self.n = d_state
self.bidirectional = bidirectional
self.ln = ln
self.channels = channels
self.transposed = transposed
self.linear = linear
self.return_state = return_state
self.contract_version = contract_version
self.out_channels = out_channels
self.verbose = verbose
self.kernel_args = kernel_args
self.D = nn.Parameter(torch.randn(self.channels, self.h)) # TODO if self.out_channels
self.trank = trank
if self.out_channels is not None:
channels *= self.out_channels
# # Swap channels and out_channels
# # self.out_channels = channels
# self.h = self.out_channels
# # channels *= out_channels
# self.in_channels = d_model
# channels *= d_model
assert self.linear # TODO change name of linear_output
channels *= self.trank
if self.bidirectional:
channels *= 2
# Check dimensions and kernel sizes
if dim is None:
assert utils.is_list(l_max)
# assert l_max is not None # TODO implement auto-sizing functionality for the kernel
if l_max is None:
self.l_max = [None] * dim
elif isinstance(l_max, int):
self.l_max = [l_max] * dim
else:
assert l_max is None or utils.is_list(l_max)
self.l_max = l_max
# SSM Kernel
if kernel is None and mode is not None: kernel = mode
self._kernel_channels = channels
self.kernel = nn.ModuleList([
# SSKernel(self.h, N=self.n, L=L, channels=channels, verbose=verbose, **kernel_args)
kernel_registry[kernel](d_model=self.h, d_state=self.n, l_max=L, channels=channels, verbose=verbose, **kernel_args)
for L in self.l_max
])
if not self.linear:
self.activation = Activation(activation)
dropout_fn = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
self.output_linear = LinearActivation(
self.h*self.channels,
self.h,
transposed=self.transposed,
initializer=initializer,
activation=final_act,
activate=True,
weight_norm=weight_norm,
)
## To handle some operations with unspecified number of dims, we're going to define the einsum/einops contractions programmatically
# Outer product function for the convolution kernel taking arbitary number of dims
contract_str = ', '.join([f'... {chr(i+97)}' for i in range(len(self.l_max))]) \
+ ' -> ... ' \
+ ' '.join([f'{chr(i+97)}' for i in range(len(self.l_max))])
# self.nd_outer = oe.contract_expression(
# contract_str,
# *[(channels*self.trank, self.h, 2*l) for l in l_max]
# )
# Slice first half of each length dimension after the iFFT
# e.g. in 2D the forward pass wants to call:
# y = rearrange(y, '... (f0 l1) (f1 l1) -> (f0 f1) ... (l0 l1)', f0=2, f1=2)
# y = y[0]
# self.nd_slice = '... ' + ' '.join([f"(f{i} l{i})" for i in range(len(l_max))]) + ' -> (' + ' '.join([f"f{i}" for i in range(len(l_max))]) + ') ... (' + ' '.join([f"l{i}" for i in range(len(l_max))]) + ')'
# unflattened L dim by removing last '()'
# self.nd_slice = '... ' + ' '.join([f"(f{i} l{i})" for i in range(len(l_max))]) + ' -> (' + ' '.join([f"f{i}" for i in range(len(l_max))]) + ') ... ' + ' '.join([f"l{i}" for i in range(len(l_max))])
# self.nd_slice_args = { f"f{i}": 2 for i in range(len(l_max)) }
def _reinit(self, dt_min=None, dt_max=None, normalize=False, **kwargs):
""" Sets time kernel to custom value """
assert len(self.l_max) == 3
L = self.l_max[-3]
# init = init or 'fourier'
dt_min = dt_min or 2./L
dt_max = dt_max or 2./L
print(f"S4ND reinit args: {dt_min=} {dt_max=}", kwargs)
kernel_args = {
**self.kernel_args, **{
'H': self.h,
'N': self.n,
'L': L,
# 'init': init,
'dt_min': dt_min,
'dt_max': dt_max,
# 'deterministic': True,
'channels': self._kernel_channels,
**kwargs,
}
}
time_kernel = SSKernel(**kernel_args)
if normalize:
with torch.no_grad():
time_kernel.kernel.C /= (0.5 * time_kernel.kernel.log_dt.exp()[:, None, None])
self.kernel[-3] = time_kernel
def forward(self, u, rate=1.0, state=None, **kwargs): # absorbs return_output and transformer src mask
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as u
"""
half_precision = False
# fft can only handle float32
if u.dtype == torch.float16:
half_precision = True
u = u.to(torch.float32)
assert state is None, f"state not currently supported in S4ND"
# ensure shape is B, C, L (L can be multi-axis)
if not self.transposed:
u = rearrange(u, "b ... h -> b h ...")
L_input = u.shape[2:]
L_kernel = [
l_i if l_k is None else min(l_i, round(l_k / rate)) for l_i, l_k in zip(L_input, self.l_max)
]
# Compute SS Kernel
# 1 kernel for each axis in L
k = [kernel(L=l, rate=rate)[0] for kernel, l in zip(self.kernel, L_kernel)]
if self.bidirectional: # halves channels
k = [torch.chunk(_k, 2, dim=-3) for _k in k] # (C H L)
k = [
F.pad(k0, (0, l)) + F.pad(k1.flip(-1), (l, 0))
# for l, (k0, k1) in zip(L_kernel, k) # TODO bug??
for l, (k0, k1) in zip(L_input, k)
]
# fft can only handle float32
if u.dtype == torch.float16:
half_precision = True
# cast to fp32
k.dtype = torch.float32
L_padded = [l_input + l_kernel for l_input, l_kernel in zip(L_input, L_kernel)]
u_f = torch.fft.rfftn(u, s=tuple([l for l in L_padded])) # (B H L)
k_f = [torch.fft.fft(_k, n=l) for _k, l in zip(k[:-1], L_padded[:-1])] + [torch.fft.rfft(k[-1], n=L_padded[-1])] # (C H L)
# Take outer products
if self.contract_version == 0: # TODO set this automatically if l_max is provided
k_f = contract('... c h m, ... c h n -> ... c h m n', k_f[0], k_f[1]) # (H L1 L2) # 2D case of next line
# k_f = self.nd_outer(*k_f)
# sum over tensor rank
k_f = reduce(k_f, '(r c) h ... -> c h ...', 'sum', r=self.trank) / self.trank # reduce_mean not available for complex... # TODO does it matter if (r c) or (c r)?
y_f = contract('bh...,ch...->bch...', u_f, k_f) # k_f.unsqueeze(-4) * u_f.unsqueeze(-3) # (B C H L)
else:
contract_str_l = [f'{chr(i+100)}' for i in range(len(L_input))]
contract_str = 'b ... ' + ' '.join(contract_str_l) + ', ' \
+ ', '.join(['... ' + l for l in contract_str_l]) \
+ ' -> b ... ' \
+ ' '.join(contract_str_l)
y_f = contract(contract_str, u_f, *k_f)
k_f = reduce(y_f, 'b (r c) h ... -> b c h ...', 'sum', r=self.trank) / self.trank # reduce_mean not available for complex... # TODO does it matter if (r c) or (c r)?
# Contract over channels if not depthwise separable
if self.out_channels is not None:
y_f = reduce(y_f, 'b (i c) h ... -> b c i ...', 'sum', i=self.out_channels) # TODO normalization might not be right
y = torch.fft.irfftn(y_f, s=tuple([l for l in L_padded]))
# need to cast back to half if used
if half_precision:
y = y.to(torch.float16)
# y = y[..., :self.l_max[0], :self.l_max[1]] # 2D case of next line
# y = rearrange(y, self.nd_slice, **self.nd_slice_args) # programmatically using einops
# y = y[0]
y = multiple_axis_slice(y, L_input)
# Compute D term in state space equation - essentially a skip connection
# B, C, H, L (not flat)
if not self.out_channels:
y = y + contract('bh...,ch->bch...', u, self.D) # u.unsqueeze(-3) * self.D.unsqueeze(-1)
# Reshape to flatten channels
# B, H, L (not flat)
y = rearrange(y, 'b c h ... -> b (c h) ...')
if not self.linear:
y = self.dropout(self.activation(y))
# ensure output and input shape are the same
if not self.transposed:
# B, H, L -> B, H, C
y = rearrange(y, "b h ... -> b ... h")
# y = self.norm(y)
if not self.linear:
y = self.output_linear(y)
if self.return_state:
return y, None
else: return y
def default_state(self, *batch_shape, device=None):
return self._initial_state.repeat(*batch_shape, 1, 1)
@property
def d_output(self):
return self.h
# return self.h if self.out_channels is None else self.out_channels
@property
def d_state(self):
raise NotImplementedError
@property
def state_to_tensor(self):
raise NotImplementedError
| state-spaces-main | src/models/sequence/modules/s4nd.py |
"""Implementation of modular block design used in S4. Compatible with other kernels."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as U
from functools import partial
from einops import rearrange, repeat
from src.models.nn import LinearActivation, Activation, DropoutNd
from src.models.sequence.base import SequenceModule
from src.models.sequence.kernels.fftconv import FFTConv
import src.utils as utils
import src.utils.registry as registry
import src.utils.train
log = src.utils.train.get_logger(__name__)
contract = torch.einsum
class S4Block(SequenceModule):
"""General block design wrapping an inner layer. Currently only layer=FFTConv is supported, but easy to incorporate others.
Arguments:
- bottleneck: Reduce dimension of inner layer (e.g. used in GSS).
- gate: Add multiplicative gating (e.g. used in GSS), which is essentially a multiplicative instead of additive residual branch.
- gate_act: Activation function to apply on the gate residual branch.
- mult_act: Activation function to apply after gate multiplication (e.g. GELU in GSS).
- final_act: Activation function to apply after final linear layer. 'id' for no activation, None for no linear layer at all.
- initializer: Initializer on final linear layer.
- weight_norm: Weight normalization on final linear layer.
- dropout: standard dropout argument. tie_dropout=True ties the dropout mask across the sequence length, emulating nn.Dropout1d
- transposed: Choose backbone axis ordering of (B, L, H) (if False) or (B, H, L) (if True) [B=batch size, L=sequence length, H=model dimension]
Other options are all experimental and should not need to be configured.
"""
def __init__(
self,
d_model,
bottleneck=None,
activation='gelu',
gate=None,
gate_act=None,
mult_act=None,
final_act='glu',
postact=None,
initializer=None,
weight_norm=False,
dropout=0.0,
tie_dropout=False,
transposed=True,
layer='fftconv',
**layer_args, # Arguments into inner layer (e.g. FFTConv)
):
super().__init__()
self.d_model = d_model
self.transposed = transposed
self.gate = gate
self.bottleneck = bottleneck
if bottleneck is not None:
self.d_model = self.d_model // bottleneck
self.input_linear = LinearActivation(
self.d_model,
self.d_model,
transposed=False,
initializer=initializer,
activation=None,
activate=False,
weight_norm=weight_norm,
)
if gate is not None:
self.input_gate = LinearActivation(
self.d_model,
self.d_model * gate,
transposed=False,
initializer=initializer,
activation=gate_act,
activate=True,
weight_norm=weight_norm,
)
if self.layer.d_output != self.d_model * gate:
self.output_gate = LinearActivation(
self.d_model*self.channels,
self.d_model * gate,
transposed=False,
initializer=initializer,
activation=None,
activate=False,
weight_norm=weight_norm,
)
# Currently this module only uses FFTConv for its inner module
# But the options here are all agnostic to the inner block
# If other types of inner layers are desired, it is easy
# to add an option to swap a different module in
# self.layer = FFTConv(d_model, transposed=False, dropout=dropout, tie_dropout=tie_dropout, **layer_args)
layer_cfg = layer_args.copy()
layer_cfg['_name_'] = layer
layer_cfg['transposed'] = False
layer_cfg['dropout'] = dropout
self.layer = utils.instantiate(registry.layer, layer_cfg, d_model)
# Pointwise operations
# Activation after layer
self.activation = Activation(activation)
# Activation after (optional) multiplication by gate branch
self.mult_activation = Activation(mult_act)
# dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout # Broken in torch==1.11
dropout_fn = partial(DropoutNd, transposed=False) if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
if postact is not None:
assert final_act is None
log.warning("Warning: 'postact' option changed to 'final_act' and will be removed in a future version.")
final_act, postact = postact, final_act
if final_act is None:
self.output_linear = nn.Identity()
else:
self.output_linear = LinearActivation(
self.d_model*gate if gate is not None else self.layer.d_output,
self.d_model,
transposed=False,
initializer=initializer,
activation=final_act,
activate=True,
weight_norm=weight_norm,
)
def forward(self, x, lengths=None, **kwargs): # absorbs return_output and transformer src mask
"""
x: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as x
"""
if self.transposed: x = rearrange(x, 'b d ... -> b ... d')
L = x.size(1)
# Mask out padding tokens
# TODO handle option for mask - instead of lengths, which assumes suffix padding
if isinstance(lengths, int):
if lengths != L:
lengths = torch.tensor(lengths, dtype=torch.long, device=x.device)
else:
lengths = None
if lengths is not None:
assert isinstance(lengths, torch.Tensor) and lengths.ndim == 1 and lengths.size(0) in [1, x.size(0)]
mask = torch.where(torch.arange(L, device=lengths.device)[:, None] < lengths[:, None, None], 1., 0.)
x = x * mask
if self.gate is not None:
v = self.input_gate(x)
if self.bottleneck is not None:
x = self.input_linear(x)
y, state = self.layer(x, **kwargs)
y = self.activation(y)
if self.gate is not None:
y = self.output_gate(y)
y = y * v
y = self.mult_activation(y)
y = self.drop(y)
y = self.output_linear(y)
if self.transposed: y = rearrange(y, 'b d ... -> b ... d')
return y, state
def setup_step(self, **kwargs):
self.layer.setup_step(**kwargs)
def step(self, x, state):
"""Step one time step as a recurrent model. Intended to be used during validation.
x: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
if self.gate is not None:
v = self.input_gate(x)
if self.bottleneck is not None:
x = self.input_linear(x)
y, next_state = self.layer.step(x, state) # (B C H)
if self.gate is not None:
y = self.output_gate(y)
y = y * v
y = self.mult_activation(y)
y = self.drop(y)
y = self.output_linear(y)
return y, next_state
def default_state(self, *batch_shape, device=None):
# kernel is not a SequenceModule so it doesn't need to adhere to same interface
# the kernel will know the device of its own parameters
return self.layer.default_state(*batch_shape)
@property
def d_state(self):
return self.layer.d_state
@property
def d_output(self):
return self.d_model
@property
def state_to_tensor(self):
return self.layer.state_to_tensor
| state-spaces-main | src/models/sequence/modules/s4block.py |
"""Implementation of FFN block in the style of Transformers."""
from functools import partial
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import LinearActivation, DropoutNd
class FFN(SequenceModule):
def __init__(
self,
d_input,
expand=2,
d_output=None,
transposed=False,
activation='gelu',
initializer=None,
dropout=0.0,
tie_dropout=False,
):
super().__init__()
self.d_output = d_input if d_output is None else d_output
self.transposed = transposed
d_inner = int(expand * d_input)
linear1 = LinearActivation(
d_input, d_inner,
transposed=transposed,
activation=activation,
initializer=initializer,
activate=True,
)
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
# dropout_cls = nn.Dropout2d if self.transposed else nn.Dropout
drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
linear2 = LinearActivation(
d_inner, self.d_output,
transposed=transposed,
activation=None,
initializer=initializer,
activate=False,
)
self.ff = nn.Sequential(
linear1,
drop,
linear2,
)
def forward(self, x, *args, **kwargs):
return self.ff(x), None
def step(self, x, state, **kwargs):
# x: [batch, d_input]
if self.transposed:
# expects: [batch, d_input, seq_len]
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
else:
return self.ff(x), state
| state-spaces-main | src/models/sequence/modules/ffn.py |
# Adapted from https://github.com/facebookresearch/mega/blob/ea355255149d38ffe16bf2c176d47c3864e8b05a/fairseq/modules/moving_average_gated_attention.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Mega block design (Gating + Attention + EMA/SSM)."""
import math
from functools import partial
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from src.models.nn import Activation, DropoutNd, Normalization
from src.models.sequence.backbones.block import SequenceResidualBlock
from src.models.sequence.kernels.fftconv import FFTConv
class MegaBlock(nn.Module):
"""Block design from "Mega: Exponential Moving Average Gated Attention" paper.
This class is a consolidated version of the MovingAveragedGatedAttention and MegaEncoderLayer classes
from the official Mega code. They have been consolidated into one class, combining the EMA+Attention
module together with the feed-forward network (FFN) module, by composing primitives from this codebase.
This is meant to be a faithful adaptation of the original code, with the following changes:
- Several variable names have been changed to be consistent with this codebase.
- Some annotations have been changed and added, referencing the original paper and code where possible.
- The recurrent state implementation has been removed, which had a design pattern that departs
too much from this codebase. An adaptation of this functionality may be added in the future.
An additional feature is supported where aside from the original MultiHeadEMA,
other forms of global convolution from models.sequence.kernels can be used interchangeably.
"""
def __init__(
# Options are annotated with the original argument names
# from MovingAverageGatedAttention and MegaEncoderLayer
self,
d_model, # Mega: embed_dim
d_attin, # Mega: zdim
d_attout, # Mega: hdim
d_state, # Mega: ndim
dropout=0.0,
drop_attin=None, # Mega: attention_dropout
drop_attout=None, # Mega: hidden_dropout
activation='silu',
attention_activation='softmax',
bidirectional=False,
chunk=-1, # Mega: chunk_size
l_max=None, # Mega: truncation
norm='layer', # Mega: norm_type
prenorm=True,
tie_dropout=False, # Mega: feature_dropout
rel_pos_bias='simple',
max_positions=1024,
ff_expand=2, # Mega: encoder_ffn_embed_dim
drop_ffn=None, # Mega: activation_dropout
transposed=False, # Inputs shape (B L D)
mode='mega',
# If mode='mega', use the official Mega MultiHeadEMA class verbatim
# Otherwise, construct a convolution kernel from kernel.py and use a general SSM wrapper
# mode='ema' uses the same core kernel code from MultiHeadEMA, and should be exactly the same as mode='mega'
# mode='nplr' uses the S4 kernel
# mode='diag' uses the S4D kernel, etc.
**ssm_args, # pass other keyword arguments to the SSM kernels
):
super().__init__()
self.transposed = transposed
self.d_model = d_model
self.d_output = d_model
self.d_attout = d_attout
self.d_attin = d_attin
self.d_state = d_state
self.activation = Activation(activation)
self.attention_activation_fn = None if attention_activation == 'softmax' else Activation(attention_activation)
self.scaling = self.d_attin ** -0.5 if attention_activation == 'softmax' else None
# Configure dropout
if drop_attin is None: drop_attin = dropout
if drop_attout is None: drop_attout = dropout
if drop_ffn is None: drop_ffn = dropout
dropout_fn = partial(DropoutNd, transposed=False) if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
self.drop_attout = dropout_fn(drop_attout) if drop_attout > 0.0 else nn.Identity()
self.drop_attin = nn.Dropout(drop_attin)
self.l_chunk = chunk
self.prenorm = prenorm
self.norm = Normalization(d_model, _name_=norm, transposed=False)
# Construct a *linear* SSM
if mode == 'mega':
self.ssm = MultiHeadEMA(
d_model,
d_state=d_state,
bidirectional=bidirectional,
l_max=l_max,
)
else:
self.ssm = FFTConv(
d_model,
d_state=d_state,
bidirectional=bidirectional,
l_max=l_max,
activation=None,
mode=mode,
transposed=False,
**ssm_args,
)
self.v_proj = nn.Linear(d_model, d_attout) # U_v (eq. 10)
self.mx_proj = nn.Linear(d_model, d_attin + d_attout + 2 * d_model)
self.h_proj = nn.Linear(d_attout, d_model) # U_h (eq. 14)
self.gamma = nn.Parameter(torch.Tensor(2, d_attin))
self.beta = nn.Parameter(torch.Tensor(2, d_attin))
self.max_positions = max_positions
max_positions = max_positions if self.l_chunk < 0 else self.l_chunk
if rel_pos_bias == 'simple':
self.rel_pos_bias = SimpleRelativePositionalBias(max_positions)
elif rel_pos_bias == 'rotary':
self.rel_pos_bias = RotaryRelativePositionalBias(d_attin, max_positions)
else:
raise ValueError('unknown relative position bias: {}'.format(rel_pos_bias))
# NFFN (normalized feed-forward network)
if ff_expand is not None and ff_expand > 0:
ffn_cfg = {
'_name_': 'ffn',
'expand': ff_expand,
'activation': activation,
'dropout': drop_ffn,
'tie_dropout': tie_dropout,
'transposed': transposed,
}
self.nffn = SequenceResidualBlock(
d_model,
prenorm=prenorm,
dropout=dropout,
tie_dropout=tie_dropout,
residual='R',
norm=norm,
layer=ffn_cfg,
transposed=transposed,
)
else:
self.nffn = None
self.reset_parameters()
def reset_parameters(self):
std = 0.02
nn.init.normal_(self.v_proj.weight, mean=0.0, std=std)
nn.init.constant_(self.v_proj.bias, 0.0)
nn.init.normal_(self.mx_proj.weight, mean=0.0, std=std)
nn.init.constant_(self.mx_proj.bias, 0.0)
nn.init.normal_(self.h_proj.weight, mean=0.0, std=std)
nn.init.constant_(self.h_proj.bias, 0.0)
nn.init.normal_(self.gamma, mean=0.0, std=std)
nn.init.constant_(self.beta, 0.0)
def element_attention(self, q, k, padding_mask, attn_mask, before_attn_fn):
slen = k.size(2)
if padding_mask is not None:
inverse_mask = 1.0 - padding_mask.type_as(q) # (B K C)
lengths = inverse_mask.sum(dim=-1, keepdim=True) # (B K 1)
lengths = lengths.clamp(min=1.0).unsqueeze(-1) # (B K 1 1) TODO finish transcribing
else:
lengths = slen
inverse_mask = None
if attn_mask is not None:
# C x 1
lengths = attn_mask.sum(dim=-1, keepdim=True)
# C x C
bias = self.rel_pos_bias(slen)
if slen != q.size(2):
assert q.size(2) == 1
# 1 x C
bias = bias[-1:]
# B x K x C x C
qk = torch.matmul(q, k.transpose(2, 3)) / lengths + bias
if before_attn_fn:
return qk
attn_weights = self.attention_activation_fn(qk)
if inverse_mask is not None:
attn_weights = attn_weights * inverse_mask.unsqueeze(2)
if attn_mask is not None:
attn_weights = attn_weights * attn_mask
return attn_weights
def softmax_attention(self, q, k, padding_mask, attn_mask, before_attn_fn):
slen = k.size(2)
# C x C
bias = self.rel_pos_bias(slen)
if slen != q.size(2):
assert q.size(2) == 1
# 1 x C
bias = bias[-1:]
# scaled attention
q = q * self.scaling
# B x K x C x C
qk = torch.matmul(q, k.transpose(2, 3)) + bias
if attn_mask is not None:
qk = qk + attn_mask
if padding_mask is not None:
padding_mask_all = padding_mask.all(dim=-1, keepdim=True)
padding_mask = torch.logical_and(padding_mask, ~padding_mask_all)
qk = qk.masked_fill(padding_mask.unsqueeze(2).to(torch.bool), float('-inf'))
if before_attn_fn:
return qk
attn_weights = F.softmax(qk, dim=-1)
return attn_weights
def forward(
self,
x,
state=None, # TODO consolidate with incremental_state
padding_mask: Optional[torch.Tensor] = None, # Mega: encoder_padding_mask
need_weights: bool = False,
attn_mask: Optional[torch.Tensor] = None,
before_attn_fn: bool = False,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: (B L D) = batch, length, dimension
Dimensions:
B: Batch size
L: Sequence length (l_seq)
C: Chunk size
K: Number of chunks (L / C)
D: Model dimension (d_model) ($d$ in paper)
V: Dim. attention output (Mega: paper $v$, code `d_attout`, annotation E)
Z: Dim. attention input (Mega: paper $z$, code `d_attin`, annotation S)
"""
if self.transposed: x = x.transpose(-1, -2) # (B L D)
B, L, D = x.size()
assert D == self.d_model
residual = x
if self.prenorm:
x = self.norm(x)
v = self.activation(self.v_proj(x)) # (B L V)
mx, _ = self.ssm(x, state=state, padding_mask=padding_mask) # (B L D)
# Original Mega code bakes a SiLU activation at the end of the MultiHeadEMA module
# It has been moved here, which makes more sense to keep the SSM module linear
# and so the activation is configurable and consistent with the rest of the block
mx = self.activation(mx)
mx = self.dropout(mx)
base = self.mx_proj(mx) # (B L D) -> (B L 2*D+Z+V)
u, zr, hx = torch.split(base, [D, self.d_attin + self.d_attout, D], dim=-1)
u = torch.sigmoid(u) # (B L D) \phi (eq. 13)
# Mega specifies to hard-code SiLU here, but the self.activation is always silu
# in their configs anyways so this seems more sensible in case it's changed
z, r = torch.split(self.activation(zr), [
self.d_attin, # z = (B L Z) Z (eq. 7)
self.d_attout, # r = (B L V) \gamma (eq. 12)
], dim=-1)
z = z.unsqueeze(2) * self.gamma + self.beta
q, k = torch.unbind(z, dim=2) # (B L Z) Q and K (eq. 8 and 9)
q = q.unsqueeze(1) # (B 1 L Z)
k = k.unsqueeze(1) # (B 1 L Z)
v = v.unsqueeze(1) # (B 1 L Z)
if self.l_chunk < 0:
if padding_mask is not None:
padding_mask = padding_mask.unsqueeze(1) # (B 1 L)
else:
if L < self.l_chunk:
pass
else:
q = rearrange(q, 'b 1 (k c) z -> b k c z', c=self.l_chunk)
l_ctx = k.size(2) # Transcribed from orig, why is this not the same as L?
if l_ctx < self.l_chunk:
if padding_mask is not None:
padding_mask = padding_mask.unsqueeze(1) # (B 1 C)?
else:
k = rearrange(k, 'b 1 (k c) z -> b k c z', c=self.l_chunk)
v = rearrange(v, 'b 1 (k c) z -> b k c z', c=self.l_chunk)
if padding_mask is not None:
padding_mask = rearrange(padding_mask, 'b (k c) -> b k c', c=self.l_chunk)
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if padding_mask is not None and padding_mask.dim() == 0:
padding_mask = None
if self.attention_activation_fn is None: # Softmax case
attn_weights = self.softmax_attention(q, k, padding_mask, attn_mask, before_attn_fn)
else:
attn_weights = self.element_attention(q, k, padding_mask, attn_mask, before_attn_fn)
if before_attn_fn:
if self.transposed: v = v.transpose(-1, -2)
# return attn_weights, v # TODO looks like bug in orig code
return v, attn_weights
v = self.drop_attout(v) # (B K C V)
kernel = self.drop_attin(attn_weights) # (B K C C)
h = rearrange(torch.matmul(kernel, v), 'b k c v -> b (k c) v') # (B L V)
h = self.activation(hx + self.h_proj(h * r)) # (B L D)
h = self.dropout(h)
# Output (y) from update gate u (\phi): u * h + (1-u) * x, eq. (15)
out = torch.addcmul(residual, u, h - residual) # (B L D)
if not self.prenorm:
out = self.norm(out)
if self.transposed: out = out.transpose(-1, -2)
# FFN
out, _ = self.nffn(out, state=None)
if not need_weights: attn_weights = None
# Because this class expects to return a state, it's a little inconvenient to return attention weights.
# The official Mega code doesn't return it either.
return out, _ # , attn_weights
def extra_repr(self) -> str:
return 'd_model={}, d_attin={}, d_attout={}, d_state={}, chunk={}, attn_act={}, prenorm={}'.format(self.d_model, self.d_attin,
self.d_attout, self.d_state, self.l_chunk,
self.attention_activation, self.prenorm)
"""
EMA (Exponential Moving Average) module.
Adapted from https://github.com/facebookresearch/mega/blob/ea355255149d38ffe16bf2c176d47c3864e8b05a/fairseq/modules/exponential_moving_average.py
"""
class MultiHeadEMA(nn.Module):
"""Exponential Moving Average Layer.
This class is a verbatim translation of the original code with minor differences that
do not change the code execution path.
- A few variable names have been changed to be more consistent with this codebase.
- State passing is not supported ("incremental_state" in the Mega code),
as the original module uses a different fairseq interface than this codebase.
- The only semantic change is removing the final SiLU activation,
which is handled by the caller module (e.g. src.models.sequence.mega.MegaBlock).
It is possible to recreate the functionality of MultiHeadEMA by using other modular blocks,
in particular the src.models.sequence.kernels.fftconv.FFTConv block
together with src.models.sequence.kernels.kernel.EMAKernel.
"""
def __init__(
self,
d_model,
d_state=2,
bidirectional=False,
l_max=None,
):
super().__init__()
self.H = d_model
self.N = d_state
self.bidirectional = bidirectional
self.l_max = l_max
self.scale = math.sqrt(1.0 / self.N)
H = 2 * self.H if self.bidirectional else self.H
# This is a state-space model variant of S4(D) where
# delta, alpha, beta, gamma, omega directly correspond to
# the \Delta, A, B, C, D parameters of SSMs
self.delta = nn.Parameter(torch.Tensor(H, self.N, 1))
self.alpha = nn.Parameter(torch.Tensor(H, self.N, 1))
self.beta = nn.Parameter(torch.Tensor(H, self.N))
self.gamma = nn.Parameter(torch.Tensor(H, self.N))
self.omega = nn.Parameter(torch.Tensor(self.H))
self._kernel = None
self._coeffs = None
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
# delta & alpha (dt and A parameters of SSM)
nn.init.normal_(self.delta, mean=0.0, std=0.2)
nn.init.normal_(self.alpha, mean=0.0, std=0.2)
# Mega: beta [1, -1, 1, -1, ...] seems more stable.
val = torch.ones(self.N)
if self.N > 1:
idx = torch.tensor(list(range(1, self.N, 2)))
val.index_fill_(0, idx, -1.0)
self.beta.normal_(mean=0.0, std=0.02).add_(val)
# gamma & omega (C and D parameters of SSM)
# should be unit variance, as specified in HTTYH
nn.init.normal_(self.gamma, mean=0.0, std=1.0)
nn.init.normal_(self.omega, mean=0.0, std=1.0)
def _calc_coeffs(self):
self._coeffs = None
p = torch.sigmoid(self.delta) # (H N 1)
alpha = torch.sigmoid(self.alpha)
q = 1.0 - p * alpha
return p, q
def _compute_kernel(self, L: int):
self._kernel = None
# Materialize parameters - analog of SSM discretization
p, q = self._calc_coeffs() # (H N 1)
vander = torch.log(q) * torch.arange(L).to(p).view(1, 1, L) # (H N L)
kernel = p[..., 0] * self.beta * self.gamma * self.scale
return torch.einsum('dn,dnl->dl', kernel, torch.exp(vander)) # (H L)
def coeffs(self):
if self.training:
return self._calc_coeffs()
else:
if self._coeffs is None:
self._coeffs = self._calc_coeffs()
return self._coeffs
def kernel(self, L: int):
L = L if self.l_max is None else min(self.l_max, L)
if self.training:
return self._compute_kernel(L)
else:
if self._kernel is None or self._kernel.size(-1) < L:
self._kernel = self._compute_kernel(L)
return self._kernel[..., :L]
def forward(
self,
u,
state: Optional[Dict[str, Dict[str, Optional[torch.Tensor]]]] = None,
padding_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, None]:
"""Input shape: Time x Batch x Channel
Args:
padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
"""
B, L, H = u.size()
assert H == self.H
u = u.transpose(-1, -2) # (B H L)
if padding_mask is not None:
u = u * (1.0 - padding_mask.unsqueeze(1).type_as(u))
# assert not self.bidirectional or state is None, 'Bidirectional EMA does not support incremental state'
if state is not None:
raise NotImplementedError(
"MultiHeadEMA module does not support state passing in this repository."
"Use S4D for more functionality such as state passing."
)
else:
k = self.kernel(L) # (H L)
l_fft = L
s = 0
l_kernel = k.size(1)
assert l_kernel == L
u_ = u
if self.bidirectional:
# This is twice as inefficient as it could be
# See S4 FFT conv bidirectional implementation for improvement
k1, k2 = torch.split(k, [self.H, self.H], dim=0)
k = F.pad(k1, (l_kernel - 1, 0)) + F.pad(k2.flip(-1), (0, l_kernel - 1)) # (H 2*L-1)
u_ = F.pad(u, (l_kernel - 1, 0))
l_fft = l_fft + l_kernel - 1
s = 2 * l_kernel - 2
k_f = torch.fft.rfft(k.float(), n=2 * l_fft)
u_f = torch.fft.rfft(u_.float(), n=2 * l_fft)
y = torch.fft.irfft(u_f * k_f, n=2 * l_fft)[..., s:s + L] # (B H L)
y = y.type_as(u)
y = y + u * self.omega.unsqueeze(-1) # (B H L)
y = y.transpose(-1, -2)
return y, None # empty state
def extra_repr(self) -> str:
return 'edim={}, N={}, bidirectional={}, trunction={}'.format(self.H, self.N, self.bidirectional, self.l_max)
"""
Relative positional bias modules.
From https://github.com/facebookresearch/mega/blob/ea355255149d38ffe16bf2c176d47c3864e8b05a/fairseq/modules/relative_positional_bias.py
"""
class SimpleRelativePositionalBias(nn.Module):
def __init__(self, max_positions):
super().__init__()
self.max_positions = max_positions
self.rel_pos_bias = nn.Parameter(torch.Tensor(2 * max_positions - 1))
self.reset_parameters()
def reset_parameters(self):
std = 0.02
nn.init.normal_(self.rel_pos_bias, mean=0.0, std=std)
def forward(self, L):
if L > self.max_positions:
raise ValueError('Sequence length {} going beyond max length {}'.format(L, self.max_positions))
# L * 2 -1
b = self.rel_pos_bias[(self.max_positions - L):(self.max_positions + L - 1)]
# L * 3 - 1
t = F.pad(b, (0, L))
# (L * 3 - 1) * L
t = torch.tile(t, (L,))
t = t[:-L]
# L x (3 * L - 2)
t = t.view(L, 3 * L - 2)
r = (2 * L - 1) // 2
start = r
end = t.size(1) - r
t = t[:, start:end]
return t
def extra_repr(self) -> str:
return 'max positions={}'.format(self.max_positions)
class RotaryRelativePositionalBias(nn.Module):
def __init__(self, d_model, max_positions):
super().__init__()
assert d_model % 2 == 0
self.d_model = d_model
self.max_positions = max_positions
self.sine, self.cosine = RotaryRelativePositionalBias.get_sinusoid_embeddings(max_positions, d_model)
self.alpha = nn.Parameter(torch.Tensor(1, d_model))
self.beta = nn.Parameter(torch.Tensor(1, d_model))
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.reset_parameters()
def reset_parameters(self):
std = 0.02
nn.init.normal_(self.alpha, mean=0.0, std=std)
nn.init.normal_(self.beta, mean=0.0, std=std)
@staticmethod
def get_sinusoid_embeddings(max_positions: int, embedding_dim: int):
half_dim = embedding_dim // 2
emb = math.log(10000) / half_dim
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
return torch.sin(emb), torch.cos(emb)
def rotary(self, x):
n, d = x.size()
x1, x2 = torch.chunk(x, 2, dim=-1)
if self.sine is None or n > self.sine.size(0):
self.sine, self.cosine = RotaryRelativePositionalBias.get_sinusoid_embeddings(n, d)
self.max_positions = n
self.sine = self.sine.to(self._float_tensor)
self.cosine = self.cosine.to(self._float_tensor)
sin = self.sine[:n]
cos = self.cosine[:n]
return torch.cat([x1 * cos - x2 * sin, x2 * cos + x1 * sin], dim=1)
def forward(self, L):
a = self.rotary(self.alpha.expand(L, self.d_model))
b = self.rotary(self.beta.expand(L, self.d_model))
t = torch.einsum('mk,nk->mn', a, b)
return t
def extra_repr(self) -> str:
return 'dim={}, max positions={}'.format(self.d_model, self.max_positions)
| state-spaces-main | src/models/sequence/modules/megablock.py |
"""Implementation of LSSL module. Succeeded by S4."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from omegaconf import DictConfig
from src.models.nn import Activation
from src.models.functional.krylov import krylov
from src.models.hippo import transition, hippo
from src.models.functional.toeplitz import causal_convolution
from src.models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
def linear_system_from_krylov(u, C, D, k):
"""
Computes the state-space system y = Cx + Du from Krylov matrix K(A, B)
u: (L, B, ...) ... = H
C: (..., M, N) ... = H
D: (..., M)
k: (..., N, L) Krylov matrix representing b, Ab, A^2b...
y: (L, B, ..., M)
"""
# Equivalent ways to perform C @ k, slight speed differences
k = C @ k # (..., M, L)
# k = torch.einsum('... m n, ... n l -> ... m l', C, k) # C @ k
# k = torch.sum(k.unsqueeze(-3) * C.unsqueeze(-1), dim=-2) # (..., M, L) C @ k
k = rearrange(k, '... m l -> m ... l')
k = k.to(u) # if training in half precision, need to go back to float32 for the fft
k = k.unsqueeze(1) # (M, 1, ..., L)
v = u.unsqueeze(-1).transpose(0, -1) # (1, B, ..., L)
y = causal_convolution(k, v, fast=True) # (M, B, ..., L)
y = y.transpose(0, -1) # (L, B, ..., M)
y = y + u.unsqueeze(-1) * D # (L, B, ..., M)
return y
class Platypus(SequenceModule):
""" Implementation of LSSL module.
# TODO this expects (length, batch) but this codebase is now (batch, length)
"""
requires_length = True
def __init__(
self,
d,
d_model=-1, # overloading this term, same as memory_order or N
measure='legs', # 'legs', 'legt' main ones; can also try 'lagt'
measure_args={},
learn=0, # 0 means no learn, 1 means same A matrix for each hidden feature H, 2 means different A matrix per feature. 1 does not change parameter count. 2 adds parameters but does not slow down
lr=0.0001, # controls learning rate of transition parameters
noise=0.0, # injects input noise to the state space system
init='normal', # for debugging, but might be useful?
dt=None,
channels=1, # denoted by M below
bias=False,
activation='gelu',
ff=True,
weight_norm=False,
dropout=0.0,
l_max=-1,
):
"""
N: the order of the HiPPO projection
dt: discretization step size - should be roughly inverse to the length of the sequence
"""
super().__init__()
self.d = d
self.N = d_model if d_model > 0 else d
self.dt = DictConfig({
'min' : 0.001,
'max' : 0.1,
'learn' : False,
'lr': 0.001,
'init' : 'random',
})
if dt is not None: self.dt.update(dt)
self.ff = ff
self.bias = bias
# Construct transition
self.learn = learn
if self.learn == 0:
if measure == 'identity': # for testing
A, B = torch.eye(self.N), torch.ones(self.N)
self.transition = transition.ManualAdaptiveTransition(self.N, A, B)
elif measure == 'random':
A = torch.randn(self.N, self.N) / self.N # E[AA^T] = (1/N)I -- empirically I nans out
B = torch.ones(self.N) # based on HiPPO matrices; worth trying random, haven't tried
self.transition = transition.ManualAdaptiveTransition(self.N, A, B)
elif measure == 'legt':
# self.transition = transition.LegTAdaptiveTransition(self.N)
self.transition = transition.LegTTriDInverseAdaptiveTransition(self.N, **measure_args)
elif measure == 'cheb':
self.transition = transition.ChebITriDInverseAdaptiveTransition(self.N, **measure_args)
elif measure == 'chebii':
self.transition = transition.ChebIITriDInverseAdaptiveTransition(self.N, **measure_args)
elif measure == 'lagt':
self.transition = transition.LagTCumsumAdaptiveTransition(self.N, **measure_args)
elif measure == 'glagt':
self.transition = transition.GLagTToeplitzAdaptiveTransition(self.N, **measure_args)
elif measure == 'legs':
self.transition = transition.LegSTriDInverseAdaptiveTransition(self.N, **measure_args)
elif measure == 'jac':
self.transition = transition.JacTriDInverseAdaptiveTransition(self.N, **measure_args)
else:
raise NotImplementedError
elif self.learn == 1 or self.learn == 2:
kwargs = {'trainable': True, 'lr': lr}
kwargs.update(measure_args)
if self.learn == 2:
kwargs['batch'] = (self.d,)
if measure == 'random':
A = torch.randn(self.N, self.N) / self.N # E[AA^T] = (1/N)I . empirically I doesn't work, dunno why
B = torch.ones(self.N) # based on HiPPO matrices; worth trying random, haven't tried
self.transition = transition.ManualAdaptiveTransition(self.N, A, B, **kwargs)
elif measure == 'legt':
self.transition = transition.LegTTriDInverseAdaptiveTransition(self.N, **kwargs)
elif measure == 'lagt':
self.transition = transition.LagTTriDInverseAdaptiveTransition(self.N, **kwargs)
elif measure == 'legs':
self.transition = transition.LegSTriDInverseAdaptiveTransition(self.N, **kwargs)
elif measure == 'cheb':
self.transition = transition.ChebITriDInverseAdaptiveTransition(self.N, **kwargs)
elif measure == 'chebii':
self.transition = transition.ChebIITriDInverseAdaptiveTransition(self.N, **kwargs)
elif measure == 'toep':
self.transition = transition.LagTToeplitzAdaptiveTransition(self.N, **kwargs)
else: raise NotImplementedError
elif self.learn == 3: # for debugging
A, B = hippo.transition(measure, self.N)
B = B[:, 0]
self.transition = transition.ManualAdaptiveTransition(self.N, A, B, trainable=True, lr=lr)
else:
raise NotImplementedError
self.m = channels
if init == 'normal':
self.C = nn.Parameter(torch.randn(self.d, self.m, self.N))
self.D = nn.Parameter(torch.randn(self.d, self.m))
elif init == 'constant':
self.C = nn.Parameter(torch.ones(self.d, self.m, self.N))
self.D = nn.Parameter(torch.ones(self.d, self.m))
elif init == 'uniform':
self.C = nn.Parameter(1.732 * torch.rand(self.d, self.m, self.N))
self.D = nn.Parameter(torch.randn(self.d, self.m))
else: raise NotImplementedError
if self.bias:
self.E = nn.Parameter(torch.zeros(self.d, self.m))
if self.dt.init == 'uniform':
log_dt = torch.linspace(math.log(self.dt.min), math.log(self.dt.max), self.d)
elif self.dt.init == 'random':
log_dt = torch.rand(self.d) * (math.log(self.dt.max)-math.log(self.dt.min)) + math.log(self.dt.min)
else: raise NotImplementedError
if self.dt.learn:
self.log_dt = nn.Parameter(log_dt) # (H)
self.log_dt._lr = self.dt.lr # register the parameter for the optimizer to reduce lr
else:
self.register_buffer('log_dt', log_dt)
self.k = None
self.noise = noise
self.activate = Activation(activation)
self.drop = nn.Dropout(dropout)
if self.ff:
self.output_linear = nn.Linear(self.m * self.d, self.d)
if weight_norm:
self.output_linear = nn.utils.weight_norm(self.output_linear)
# For test time shift
self.l_max = l_max
self.last_len = -1
def forward(self, u, *args, state=None, **kwargs):
"""
u: (L, B, H) [21-09-29] Our backbone now passes inputs as (B, L, H). This calss originally expected (L, B, H) so we transpose accordingly
state: (B, H, N) previous hidden state of the recurrence
"""
next_state = None
u = u.transpose(0, 1)
# Construct dt (H)
dt = torch.exp(self.log_dt) # Note: if dt is not learnable this slightly wastes computation, but it isn't a bottleneck
## # Calculate test-time shift
# changed sampling rate; uncache Krylov
if self.last_len != u.shape[0]:
self.k = None
self.last_len = u.shape[0]
# Calculate change from train sampling rate
if self.l_max > 0:
rate = self.l_max / u.shape[0]
# if rate != 1.0: dt = dt * rate
if rate != 1.0: rate = round(rate)
else: rate = None
else:
rate = None
# We need to compute the "recurrence" if
# (*) there is noise or an initial state
# (*) we're learning the system A, B
# (*) first pass
kb = [] # will store the B vectors for Krylov computation
_learn = (self.dt.learn or self.learn) and self.training # need to learn and it's training time # TODO this ignores the last training minibatch if no test time shift (prev batch's K gets cached)... should recalculate A in the last_len check ideally
_conv = _learn or self.k is None or u.shape[0] > self.k.shape[-1] # or rate
_noise = self.noise > 0.0 and self.training
if _conv:
B = self.transition.gbt_B(dt) # (..., N) depending if learn=2
kb.append(B)
if _noise:
noise = self.noise * torch.randn(self.d, self.N, dtype=u.dtype, device=u.device) # (H, N)
kb.append(noise)
A = None
if len(kb) > 0:
if rate is not None:
dt = dt * rate
A = self.transition.gbt_A(dt) # (..., N, N) (..., N)
# Adjust by rate
# if _conv and rate is not None:
# while rate > 1:
# B = B + torch.sum(A * B.unsqueeze(-2), dim=-1) # (I + A) @ B
# A = A @ A
# rate //= 2
kb = [b.broadcast_to(dt.shape+(self.N,)) for b in kb]
kb = torch.stack(torch.broadcast_tensors(*kb), dim=0) # each (..., N)
krylovs = krylov(u.shape[0], A, kb) # (H, N, L) each
k_noise, k_conv = torch.split(
krylovs,
split_size_or_sections=[int(_noise), int(_conv)],
dim=0
)
if _conv: # Cache the Krylov matrix K(A, B)
self.k = k_conv.squeeze(0) # (H, N, L)
if _noise:
k_noise = k_noise.squeeze(0) # (H, N, L)
# Convolution
y = linear_system_from_krylov(u, self.C, self.D, self.k[..., :u.shape[0]]) # (L, B, H, M)
if _noise:
k_noise = torch.cumsum(k_noise, dim=-1) # (H, N, L) w + Aw + A^2w + ...
k_noise = contract('h m n, h n l -> l h m', self.C, k_noise) # C @ k
y = y + k_noise.unsqueeze(1) # (L, B, H, M)
y = y + self.noise * torch.randn(y.shape, dtype=u.dtype, device=u.device)
# State needs a special case because it has a batch dimension
if state is not None: # (B, H, N)
if A is None: A = self.transition.gbt_A(dt) # (..., N, N) (..., N)
ATC, ATL = krylov(u.shape[0], A.transpose(-1,-2), self.C.transpose(0, 1), return_power=True) # (M, H, N, L), (H, N, N) represents A^T C and (A^T)^L
y = y + contract('mhnl, bhn -> lbhm', ATC, state)
# Compute next state
with torch.no_grad():
next_state = contract('hnp, bhn -> bhp', ATL, state)
if _noise:
next_state = next_state + k_noise[..., -1]
next_state = next_state + contract('lbh, hnl -> bhn', u.flip(0), self.k[:..., u.shape[0]]) # (B, H, N)
next_state = contract('hnp, bhp -> bhn', A, next_state)
next_state = next_state.detach() # TODO necessary?
# Debugging code useful for checking if state computation is correct
# from models.functional.unroll import variable_unroll_sequential, variable_unroll
# B = self.transition.gbt_B(dt)
# inps = B*u.unsqueeze(-1) # (L, B, H, N)
# inps[0] = inps[0] + state
# xx = variable_unroll(A, inps, variable=False)
# yy = torch.sum(self.C * xx.unsqueeze(-2), dim=-1)
# yy = yy + u.unsqueeze(-1) * self.D # true output y; should equal y
# xx_ = variable_unroll(A, B*u.unsqueeze(-1), variable=False)
# yy_ = torch.sum(self.C * xx_.unsqueeze(-2), dim=-1)
# yy_ = yy_ + u.unsqueeze(-1) * self.D # output without state; should equal y before the C A^T S term was added
# ss = (A @ xx[-1].unsqueeze(-1)).squeeze(-1) # should equal next_state
# breakpoint()
# y = z
# bias term
if self.bias:
y = y + self.E
y = self.drop(self.activate(y))
y = rearrange(y, 'l b h m -> l b (h m)') # (L, B, H*M)
if self.ff:
y = self.output_linear(y) # (L, B, H)
y = y.transpose(0, 1) # Back to (B, L, H) as expected
return y, next_state
def is_initialized(self):
return self.k is not None
def initialize(self, shared_params):
if 'k' in shared_params:
self.k = shared_params['k']
else:
dt = torch.exp(self.log_dt)
A = self.transition.gbt_A(dt) # (..., N, N)
B = self.transition.gbt_B(dt) # (..., N)
self.k = krylov(1024, A, B) # (L, H, N) each
shared_params['k'] = self.k
def default_state(self, *batch_shape, device=None):
return torch.zeros(*batch_shape, self.N, device=device)
def step(self, x, state):
raise NotImplementedError("Needs to be implemented.")
@property
def d_state(self):
return self.d
@property
def d_output(self):
return self.d
@property
def state_to_tensor(self):
return lambda state: state
LSSL = TransposedModule(Platypus)
| state-spaces-main | src/models/sequence/modules/lssl.py |
"""Implements downsampling and upsampling on sequences."""
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.nn import LinearActivation
"""The following pooling modules all subscribe to the same interface.
stride: Subsample on the layer dimension.
expand: Expansion factor on the feature dimension.
"""
class DownSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.stride > 1:
assert x.ndim == 3, "Downsampling with higher-dimensional inputs is currently not supported. It is recommended to use average or spectral pooling instead."
if self.transposed:
x = x[..., 0::self.stride]
else:
x = x[..., 0::self.stride, :]
if self.expand > 1:
if self.transposed:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
else:
x = repeat(x, 'b ... d -> b ... (d e)', e=self.expand)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class DownAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=None, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
if self.expand is not None:
self.linear = LinearActivation(
d_input,
d_input * expand,
transposed=transposed,
)
def forward(self, x):
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
if self.stride > 1:
# einops appears slower than F
if x.ndim == 3:
x = F.avg_pool1d(x, self.stride, self.stride)
elif x.ndim == 4:
x = F.avg_pool2d(x, self.stride, self.stride)
else:
# Reduction string e.g. "b d (l1 2) (l2 2) -> b d l1 l2"
reduce_str = "b d " + " ".join([f"(l{i} {self.stride})" for i in range(x.ndim-2)]) \
+ " -> b d " + " ".join([f"l{i}" for i in range(x.ndim-2)])
x = reduce(x, reduce_str, 'mean')
# if self.expand > 1:
# x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
if self.expand is not None:
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
if self.expand is None:
return self.d_input
else:
return self.d_input * self.expand
class DownSpectralPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
"""
x: (B, L..., D)
"""
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
shape = x.shape[2:]
x_f = torch.fft.ifftn(x, s=shape)
for axis, l in enumerate(shape):
assert l % self.stride == 0, 'input length must be divisible by stride'
new_l = l // self.stride
idx = torch.cat([torch.arange(0, new_l-new_l//2), l+torch.arange(-new_l//2, 0)]).to(x_f.device)
x_f = torch.index_select(x_f, 2+axis, idx)
x = torch.fft.ifftn(x_f, s=[l//self.stride for l in shape])
x = x.real
if self.expand > 1:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class UpSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.expand > 1:
if self.transposed:
x = reduce(x, '... (d e) l -> ... d l', 'mean', e=self.expand)
else:
x = reduce(x, '... (d e) -> ... d', 'mean', e=self.expand)
if self.stride > 1:
if self.transposed:
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class UpAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
assert d_input % expand == 0
self.d_input = d_input
self.stride = stride
self.expand = expand
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_input // expand,
transposed=transposed,
)
def forward(self, x):
# TODO only works for 1D right now
if x is None: return None
x = self.linear(x)
if self.stride > 1:
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class DownLinearPool(SequenceModule):
def __init__(self, d_model, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
self.d_model = d_model
self.stride = stride
self.expand = expand
self.transposed = transposed
self.linear = LinearActivation(
d_model * stride,
d_model * expand,
transposed=transposed,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
@property
def d_output(self):
return self.d_input * self.expand
class UpLinearPool(SequenceModule):
def __init__(self, d, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
assert d % expand == 0
self.d_model = d
self.d_output = d // expand
# self._d_output = d_output
self.stride = stride
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
self.d_model,
self.d_output * stride,
transposed=transposed,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
"""Pooling functions with trainable parameters."""
class DownPool2d(SequenceModule):
def __init__(self, d_input, d_output, stride=1, transposed=True, weight_norm=True):
# TODO make d_output expand instead
super().__init__()
self.linear = LinearActivation(
d_input,
d_output,
transposed=transposed,
weight_norm=weight_norm,
)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride),
def forward(self, x):
if self.transposed:
x = self.pool(x)
# TODO DownPool/UpPool are currently used by unet/sashimi backbones
# DownLinearPool is used by the registry (for isotropic backbone)
# DownPool is essentially the same as DownLinearPool. These should be consolidated
class DownPool(SequenceModule):
def __init__(self, d_input, d_output=None, expand=None, stride=1, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
assert (d_output is None) + (expand is None) == 1
if d_output is None: d_output = d_input * expand
self.d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input * stride,
d_output,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
class UpPool(SequenceModule):
def __init__(self, d_input, d_output, stride, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
self.d_input = d_input
self._d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_output * stride,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
@property
def d_output(self): return self._d_output
registry = {
'sample': DownSample,
'pool': DownAvgPool,
'avg': DownAvgPool,
'linear': DownLinearPool,
'spectral': DownSpectralPool,
}
up_registry = {
# 'sample': UpSample,
'pool': UpAvgPool,
'avg': UpAvgPool,
'linear': UpLinearPool,
# 'spectral': UpSpectralPool, # Not implemented and no way to make this causal
}
| state-spaces-main | src/models/sequence/modules/pool.py |
"""PyTorch ResNet implementations.
This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with
additional dropout and dynamic global avg/max pool.
ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman
Copyright 2019, Ross Wightman
"""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg # checkpoint_seq
from timm.models.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, create_classifier
from timm.models.registry import register_model
__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
# ResNet and Wide ResNet
'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'),
'resnet18d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth',
interpolation='bicubic', first_conv='conv1.0'),
'resnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),
'resnet34d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth',
interpolation='bicubic', first_conv='conv1.0'),
'resnet26': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth',
interpolation='bicubic'),
'resnet26d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth',
interpolation='bicubic', first_conv='conv1.0'),
'resnet26t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94),
'resnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth',
interpolation='bicubic', crop_pct=0.95),
'resnet50d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth',
interpolation='bicubic', first_conv='conv1.0'),
'resnet50t': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
'resnet101': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth',
interpolation='bicubic', crop_pct=0.95),
'resnet101d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet152': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth',
interpolation='bicubic', crop_pct=0.95),
'resnet152d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet200': _cfg(url='', interpolation='bicubic'),
'resnet200d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)),
'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'),
'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'),
'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'),
'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'),
'wide_resnet50_2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth',
interpolation='bicubic'),
'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'),
# ResNets w/ alternative norm layers
'resnet50_gn': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth',
crop_pct=0.94, interpolation='bicubic'),
# ResNeXt
'resnext50_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth',
interpolation='bicubic', crop_pct=0.95),
'resnext50d_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth',
interpolation='bicubic',
first_conv='conv1.0'),
'resnext101_32x4d': _cfg(url=''),
'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'),
'resnext101_64x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth',
interpolation='bicubic', crop_pct=1.0, test_input_size=(3, 288, 288)),
'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'),
# ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags
# from https://github.com/facebookresearch/WSL-Images
# Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'),
'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'),
'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'),
'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'),
# Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models
# Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
'ssl_resnet18': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'),
'ssl_resnet50': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'),
'ssl_resnext50_32x4d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'),
'ssl_resnext101_32x4d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'),
'ssl_resnext101_32x8d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'),
'ssl_resnext101_32x16d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'),
# Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models
# Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
'swsl_resnet18': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'),
'swsl_resnet50': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'),
'swsl_resnext50_32x4d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'),
'swsl_resnext101_32x4d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'),
'swsl_resnext101_32x8d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'),
'swsl_resnext101_32x16d': _cfg(
url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'),
# Squeeze-Excitation ResNets, to eventually replace the models in senet.py
'seresnet18': _cfg(
url='',
interpolation='bicubic'),
'seresnet34': _cfg(
url='',
interpolation='bicubic'),
'seresnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth',
interpolation='bicubic'),
'seresnet50t': _cfg(
url='',
interpolation='bicubic',
first_conv='conv1.0'),
'seresnet101': _cfg(
url='',
interpolation='bicubic'),
'seresnet152': _cfg(
url='',
interpolation='bicubic'),
'seresnet152d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)
),
'seresnet200d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),
'seresnet269d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),
# Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py
'seresnext26d_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth',
interpolation='bicubic',
first_conv='conv1.0'),
'seresnext26t_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth',
interpolation='bicubic',
first_conv='conv1.0'),
'seresnext50_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth',
interpolation='bicubic'),
'seresnext101_32x4d': _cfg(
url='',
interpolation='bicubic'),
'seresnext101_32x8d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth',
interpolation='bicubic', test_input_size=(3, 288, 288), crop_pct=1.0),
'senet154': _cfg(
url='',
interpolation='bicubic',
first_conv='conv1.0'),
# Efficient Channel Attention ResNets
'ecaresnet26t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320)),
'ecaresnetlight': _cfg(
url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth',
interpolation='bicubic'),
'ecaresnet50d': _cfg(
url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth',
interpolation='bicubic',
first_conv='conv1.0'),
'ecaresnet50d_pruned': _cfg(
url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth',
interpolation='bicubic',
first_conv='conv1.0'),
'ecaresnet50t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320)),
'ecaresnet101d': _cfg(
url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth',
interpolation='bicubic', first_conv='conv1.0'),
'ecaresnet101d_pruned': _cfg(
url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth',
interpolation='bicubic',
first_conv='conv1.0'),
'ecaresnet200d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),
'ecaresnet269d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10),
crop_pct=1.0, test_input_size=(3, 352, 352)),
# Efficient Channel Attention ResNeXts
'ecaresnext26t_32x4d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
'ecaresnext50t_32x4d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
# ResNets with anti-aliasing blur pool
'resnetblur18': _cfg(
interpolation='bicubic'),
'resnetblur50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth',
interpolation='bicubic'),
'resnetblur50d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
'resnetblur101d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
'resnetaa50d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
'resnetaa101d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
'seresnetaa50d': _cfg(
url='',
interpolation='bicubic', first_conv='conv1.0'),
# ResNet-RS models
'resnetrs50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth',
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs101': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth',
input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs152': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs200': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs270': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs350': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth',
input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384),
interpolation='bicubic', first_conv='conv1.0'),
'resnetrs420': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416),
interpolation='bicubic', first_conv='conv1.0'),
}
def get_padding(kernel_size, stride, dilation=1):
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
def create_aa(aa_layer, channels, stride=2, enable=True):
if not aa_layer or not enable:
return nn.Identity()
return aa_layer(stride) if issubclass(aa_layer, nn.AvgPool2d) else aa_layer(channels=channels, stride=stride)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(BasicBlock, self).__init__()
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock does not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(
inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation,
dilation=first_dilation, bias=False)
self.bn1 = norm_layer(first_planes)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act1 = act_layer(inplace=True)
self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa)
self.conv2 = nn.Conv2d(
first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act2 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_path = drop_path
def zero_init_last(self):
nn.init.zeros_(self.bn2.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.drop_block(x)
x = self.act1(x)
x = self.aa(x)
x = self.conv2(x)
x = self.bn2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act2(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(Bottleneck, self).__init__()
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
self.conv2 = nn.Conv2d(
first_planes, width, kernel_size=3, stride=1 if use_aa else stride,
padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(width)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act2 = act_layer(inplace=True)
self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa)
self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_path = drop_path
def zero_init_last(self):
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.drop_block(x)
x = self.act2(x)
x = self.aa(x)
x = self.conv3(x)
x = self.bn3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act3(x)
return x
def downsample_conv(
in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1
p = get_padding(kernel_size, stride, first_dilation)
return nn.Sequential(*[
nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False),
norm_layer(out_channels)
])
def downsample_avg(
in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
avg_stride = stride if dilation == 1 else 1
if stride == 1 and dilation == 1:
pool = nn.Identity()
else:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
return nn.Sequential(*[
pool,
nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False),
norm_layer(out_channels)
])
def drop_blocks(drop_prob=0.):
return [
None, None,
partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None,
partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.00) if drop_prob else None]
def make_blocks(
block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32,
down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs):
stages = []
feature_info = []
net_num_blocks = sum(block_repeats)
net_block_idx = 0
net_stride = 4
dilation = prev_dilation = 1
for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))):
stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it
stride = 1 if stage_idx == 0 else 2
if net_stride >= output_stride:
dilation *= stride
stride = 1
else:
net_stride *= stride
downsample = None
if stride != 1 or inplanes != planes * block_fn.expansion:
down_kwargs = dict(
in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size,
stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer'))
downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs)
block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs)
blocks = []
for block_idx in range(num_blocks):
downsample = downsample if block_idx == 0 else None
stride = stride if block_idx == 0 else 1
block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule
blocks.append(block_fn(
inplanes, planes, stride, downsample, first_dilation=prev_dilation,
drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs))
prev_dilation = dilation
inplanes = planes * block_fn.expansion
net_block_idx += 1
stages.append((stage_name, nn.Sequential(*blocks)))
feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name))
return stages, feature_info
class ResNet(nn.Module):
"""ResNet / ResNeXt / SE-ResNeXt / SE-Net
This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that
* have > 1 stride in the 3x3 conv layer of bottleneck
* have conv-bn-act ordering
This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s
variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the
'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.
ResNet variants (the same modifications can be used in SE/ResNeXt models as well):
* normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b
* c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64)
* d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample
* e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample
* s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128)
* t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample
* tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample
ResNeXt
* normal - 7x7 stem, stem_width = 64, standard cardinality and base widths
* same c,d, e, s variants as ResNet can be enabled
SE-ResNeXt
* normal - 7x7 stem, stem_width = 64
* same c, d, e, s variants as ResNet can be enabled
SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64,
reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block
Parameters
----------
block : Block, class for the residual block. Options are BasicBlockGl, BottleneckGl.
layers : list of int, number of layers in each block
num_classes : int, default 1000, number of classification classes.
in_chans : int, default 3, number of input (color) channels.
output_stride : int, default 32, output stride of the network, 32, 16, or 8.
global_pool : str, Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'
cardinality : int, default 1, number of convolution groups for 3x3 conv in Bottleneck.
base_width : int, default 64, factor determining bottleneck channels. `planes * base_width / 64 * cardinality`
stem_width : int, default 64, number of channels in stem convolutions
stem_type : str, default ''
The type of stem:
* '', default - a single 7x7 conv with a width of stem_width
* 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2
* 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2
block_reduce_first : int, default 1
Reduction factor for first convolution output width of residual blocks, 1 for all archs except senets, where 2
down_kernel_size : int, default 1, kernel size of residual block downsample path, 1x1 for most, 3x3 for senets
avg_down : bool, default False, use average pooling for projection skip connection between stages/downsample.
act_layer : nn.Module, activation layer
norm_layer : nn.Module, normalization layer
aa_layer : nn.Module, anti-aliasing layer
drop_rate : float, default 0. Dropout probability before classifier, for training
"""
def __init__(
self, block, layers, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg',
cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, block_reduce_first=1,
down_kernel_size=1, avg_down=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None,
drop_rate=0.0, drop_path_rate=0., drop_block_rate=0., zero_init_last=True, block_args=None):
super(ResNet, self).__init__()
block_args = block_args or dict()
assert output_stride in (8, 16, 32)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
deep_stem = 'deep' in stem_type
inplanes = stem_width * 2 if deep_stem else 64
if deep_stem:
stem_chs = (stem_width, stem_width)
if 'tiered' in stem_type:
stem_chs = (3 * (stem_width // 4), stem_width)
self.conv1 = nn.Sequential(*[
nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False),
norm_layer(stem_chs[0]),
act_layer(inplace=True),
nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False),
norm_layer(stem_chs[1]),
act_layer(inplace=True),
nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)])
else:
self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(inplanes)
self.act1 = act_layer(inplace=True)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]
# Stem pooling. The name 'maxpool' remains for weight compatibility.
if replace_stem_pool:
self.maxpool = nn.Sequential(*filter(None, [
nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False),
create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None,
norm_layer(inplanes),
act_layer(inplace=True)
]))
else:
if aa_layer is not None:
if issubclass(aa_layer, nn.AvgPool2d):
self.maxpool = aa_layer(2)
else:
self.maxpool = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
aa_layer(channels=inplanes, stride=2)])
else:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Feature Blocks
channels = [64, 128, 256, 512]
stage_modules, stage_feature_info = make_blocks(
block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width,
output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down,
down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer,
drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args)
for stage in stage_modules:
self.add_module(*stage) # layer1, layer2, etc
self.feature_info.extend(stage_feature_info)
# Head (Pooling and Classifier)
self.num_features = 512 * block.expansion
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.init_weights(zero_init_last=zero_init_last)
@torch.jit.ignore
def init_weights(self, zero_init_last=True):
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
if zero_init_last:
for m in self.modules():
if hasattr(m, 'zero_init_last'):
m.zero_init_last()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem=r'^conv1|bn1|maxpool', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)')
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self, name_only=False):
return 'fc' if name_only else self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.maxpool(x)
# not available on timm 0.5.4 (stable)
# if self.grad_checkpointing and not torch.jit.is_scripting():
# x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True)
# else:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
return x if pre_logits else self.fc(x)
def forward(self, x, state=None):
x = self.forward_features(x)
x = self.forward_head(x)
return x, None
def _create_resnet(variant, pretrained=False, **kwargs):
# need to add this for TIMM 0.5.4 (stable) --> default_cfg=default_cfgs[variant],
return build_model_with_cfg(ResNet, variant, pretrained, default_cfg=default_cfgs[variant], **kwargs)
@register_model
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
return _create_resnet('resnet18', pretrained, **model_args)
@register_model
def resnet18d(pretrained=False, **kwargs):
"""Constructs a ResNet-18-D model.
"""
model_args = dict(
block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet18d', pretrained, **model_args)
@register_model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
"""
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('resnet34', pretrained, **model_args)
@register_model
def resnet34d(pretrained=False, **kwargs):
"""Constructs a ResNet-34-D model.
"""
model_args = dict(
block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet34d', pretrained, **model_args)
@register_model
def resnet26(pretrained=False, **kwargs):
"""Constructs a ResNet-26 model.
"""
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs)
return _create_resnet('resnet26', pretrained, **model_args)
@register_model
def resnet26t(pretrained=False, **kwargs):
"""Constructs a ResNet-26-T model.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)
return _create_resnet('resnet26t', pretrained, **model_args)
@register_model
def resnet26d(pretrained=False, **kwargs):
"""Constructs a ResNet-26-D model.
"""
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet26d', pretrained, **model_args)
@register_model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('resnet50', pretrained, **model_args)
@register_model
def resnet50d(pretrained=False, **kwargs):
"""Constructs a ResNet-50-D model.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet50d', pretrained, **model_args)
@register_model
def resnet50t(pretrained=False, **kwargs):
"""Constructs a ResNet-50-T model.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)
return _create_resnet('resnet50t', pretrained, **model_args)
@register_model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
return _create_resnet('resnet101', pretrained, **model_args)
@register_model
def resnet101d(pretrained=False, **kwargs):
"""Constructs a ResNet-101-D model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet101d', pretrained, **model_args)
@register_model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)
return _create_resnet('resnet152', pretrained, **model_args)
@register_model
def resnet152d(pretrained=False, **kwargs):
"""Constructs a ResNet-152-D model.
"""
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet152d', pretrained, **model_args)
@register_model
def resnet200(pretrained=False, **kwargs):
"""Constructs a ResNet-200 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs)
return _create_resnet('resnet200', pretrained, **model_args)
@register_model
def resnet200d(pretrained=False, **kwargs):
"""Constructs a ResNet-200-D model.
"""
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet200d', pretrained, **model_args)
@register_model
def tv_resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model with original Torchvision weights.
"""
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('tv_resnet34', pretrained, **model_args)
@register_model
def tv_resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model with original Torchvision weights.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('tv_resnet50', pretrained, **model_args)
@register_model
def tv_resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model w/ Torchvision pretrained weights.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
return _create_resnet('tv_resnet101', pretrained, **model_args)
@register_model
def tv_resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model w/ Torchvision pretrained weights.
"""
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)
return _create_resnet('tv_resnet152', pretrained, **model_args)
@register_model
def wide_resnet50_2(pretrained=False, **kwargs):
"""Constructs a Wide ResNet-50-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs)
return _create_resnet('wide_resnet50_2', pretrained, **model_args)
@register_model
def wide_resnet101_2(pretrained=False, **kwargs):
"""Constructs a Wide ResNet-101-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs)
return _create_resnet('wide_resnet101_2', pretrained, **model_args)
@register_model
def resnet50_gn(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model w/ GroupNorm
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('resnet50_gn', pretrained, norm_layer=GroupNorm, **model_args)
@register_model
def resnext50_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt50-32x4d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('resnext50_32x4d', pretrained, **model_args)
@register_model
def resnext50d_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnext50d_32x4d', pretrained, **model_args)
@register_model
def resnext101_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt-101 32x4d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('resnext101_32x4d', pretrained, **model_args)
@register_model
def resnext101_32x8d(pretrained=False, **kwargs):
"""Constructs a ResNeXt-101 32x8d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
return _create_resnet('resnext101_32x8d', pretrained, **model_args)
@register_model
def resnext101_64x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt101-64x4d model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs)
return _create_resnet('resnext101_64x4d', pretrained, **model_args)
@register_model
def tv_resnext50_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt50-32x4d model with original Torchvision weights.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args)
@register_model
def ig_resnext101_32x8d(pretrained=True, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args)
@register_model
def ig_resnext101_32x16d(pretrained=True, **kwargs):
"""Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args)
@register_model
def ig_resnext101_32x32d(pretrained=True, **kwargs):
"""Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs)
return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args)
@register_model
def ig_resnext101_32x48d(pretrained=True, **kwargs):
"""Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs)
return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args)
@register_model
def ssl_resnet18(pretrained=True, **kwargs):
"""Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
return _create_resnet('ssl_resnet18', pretrained, **model_args)
@register_model
def ssl_resnet50(pretrained=True, **kwargs):
"""Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('ssl_resnet50', pretrained, **model_args)
@register_model
def ssl_resnext50_32x4d(pretrained=True, **kwargs):
"""Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args)
@register_model
def ssl_resnext101_32x4d(pretrained=True, **kwargs):
"""Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args)
@register_model
def ssl_resnext101_32x8d(pretrained=True, **kwargs):
"""Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args)
@register_model
def ssl_resnext101_32x16d(pretrained=True, **kwargs):
"""Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args)
@register_model
def swsl_resnet18(pretrained=True, **kwargs):
"""Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
return _create_resnet('swsl_resnet18', pretrained, **model_args)
@register_model
def swsl_resnet50(pretrained=True, **kwargs):
"""Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return _create_resnet('swsl_resnet50', pretrained, **model_args)
@register_model
def swsl_resnext50_32x4d(pretrained=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args)
@register_model
def swsl_resnext101_32x4d(pretrained=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args)
@register_model
def swsl_resnext101_32x8d(pretrained=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args)
@register_model
def swsl_resnext101_32x16d(pretrained=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args)
@register_model
def ecaresnet26t(pretrained=False, **kwargs):
"""Constructs an ECA-ResNeXt-26-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem and ECA attn.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet26t', pretrained, **model_args)
@register_model
def ecaresnet50d(pretrained=False, **kwargs):
"""Constructs a ResNet-50-D model with eca.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet50d', pretrained, **model_args)
@register_model
def resnetrs50(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-50 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs50', pretrained, **model_args)
@register_model
def resnetrs101(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-101 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs101', pretrained, **model_args)
@register_model
def resnetrs152(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-152 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs152', pretrained, **model_args)
@register_model
def resnetrs200(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-200 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs200', pretrained, **model_args)
@register_model
def resnetrs270(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-270 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs270', pretrained, **model_args)
@register_model
def resnetrs350(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-350 model.
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs350', pretrained, **model_args)
@register_model
def resnetrs420(pretrained=False, **kwargs):
"""Constructs a ResNet-RS-420 model
Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
"""
attn_layer = partial(get_attn('se'), rd_ratio=0.25)
model_args = dict(
block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
return _create_resnet('resnetrs420', pretrained, **model_args)
@register_model
def ecaresnet50d_pruned(pretrained=False, **kwargs):
"""Constructs a ResNet-50-D model pruned with eca.
The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args)
@register_model
def ecaresnet50t(pretrained=False, **kwargs):
"""Constructs an ECA-ResNet-50-T model.
Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet50t', pretrained, **model_args)
@register_model
def ecaresnetlight(pretrained=False, **kwargs):
"""Constructs a ResNet-50-D light model with eca.
"""
model_args = dict(
block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnetlight', pretrained, **model_args)
@register_model
def ecaresnet101d(pretrained=False, **kwargs):
"""Constructs a ResNet-101-D model with eca.
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet101d', pretrained, **model_args)
@register_model
def ecaresnet101d_pruned(pretrained=False, **kwargs):
"""Constructs a ResNet-101-D model pruned with eca.
The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args)
@register_model
def ecaresnet200d(pretrained=False, **kwargs):
"""Constructs a ResNet-200-D model with ECA.
"""
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet200d', pretrained, **model_args)
@register_model
def ecaresnet269d(pretrained=False, **kwargs):
"""Constructs a ResNet-269-D model with ECA.
"""
model_args = dict(
block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnet269d', pretrained, **model_args)
@register_model
def ecaresnext26t_32x4d(pretrained=False, **kwargs):
"""Constructs an ECA-ResNeXt-26-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem. This model replaces SE module with the ECA module
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args)
@register_model
def ecaresnext50t_32x4d(pretrained=False, **kwargs):
"""Constructs an ECA-ResNeXt-50-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem. This model replaces SE module with the ECA module
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args)
@register_model
def resnetblur18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model with blur anti-aliasing
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs)
return _create_resnet('resnetblur18', pretrained, **model_args)
@register_model
def resnetblur50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model with blur anti-aliasing
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs)
return _create_resnet('resnetblur50', pretrained, **model_args)
@register_model
def resnetblur50d(pretrained=False, **kwargs):
"""Constructs a ResNet-50-D model with blur anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d,
stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnetblur50d', pretrained, **model_args)
@register_model
def resnetblur101d(pretrained=False, **kwargs):
"""Constructs a ResNet-101-D model with blur anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d,
stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnetblur101d', pretrained, **model_args)
@register_model
def resnetaa50d(pretrained=False, **kwargs):
"""Constructs a ResNet-50-D model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d,
stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnetaa50d', pretrained, **model_args)
@register_model
def resnetaa101d(pretrained=False, **kwargs):
"""Constructs a ResNet-101-D model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=nn.AvgPool2d,
stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnetaa101d', pretrained, **model_args)
@register_model
def seresnetaa50d(pretrained=False, **kwargs):
"""Constructs a SE=ResNet-50-D model with avgpool anti-aliasing
"""
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d,
stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnetaa50d', pretrained, **model_args)
@register_model
def seresnet18(pretrained=False, **kwargs):
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet18', pretrained, **model_args)
@register_model
def seresnet34(pretrained=False, **kwargs):
model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet34', pretrained, **model_args)
@register_model
def seresnet50(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet50', pretrained, **model_args)
@register_model
def seresnet50t(pretrained=False, **kwargs):
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet50t', pretrained, **model_args)
@register_model
def seresnet101(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet101', pretrained, **model_args)
@register_model
def seresnet152(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet152', pretrained, **model_args)
@register_model
def seresnet152d(pretrained=False, **kwargs):
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet152d', pretrained, **model_args)
@register_model
def seresnet200d(pretrained=False, **kwargs):
"""Constructs a ResNet-200-D model with SE attn.
"""
model_args = dict(
block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet200d', pretrained, **model_args)
@register_model
def seresnet269d(pretrained=False, **kwargs):
"""Constructs a ResNet-269-D model with SE attn.
"""
model_args = dict(
block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnet269d', pretrained, **model_args)
@register_model
def seresnext26d_32x4d(pretrained=False, **kwargs):
"""Constructs a SE-ResNeXt-26-D model.`
This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for
combination of deep stem and avg_pool in downsample.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnext26d_32x4d', pretrained, **model_args)
@register_model
def seresnext26t_32x4d(pretrained=False, **kwargs):
"""Constructs a SE-ResNet-26-T model.
This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
in the deep stem.
"""
model_args = dict(
block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnext26t_32x4d', pretrained, **model_args)
@register_model
def seresnext26tn_32x4d(pretrained=False, **kwargs):
"""Constructs a SE-ResNeXt-26-T model.
NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note
so keeping this def for backwards compat with any uses out there. Old 't' model is lost.
"""
return seresnext26t_32x4d(pretrained=pretrained, **kwargs)
@register_model
def seresnext50_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnext50_32x4d', pretrained, **model_args)
@register_model
def seresnext101_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnext101_32x4d', pretrained, **model_args)
@register_model
def seresnext101_32x8d(pretrained=False, **kwargs):
model_args = dict(
block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8,
block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('seresnext101_32x8d', pretrained, **model_args)
@register_model
def senet154(pretrained=False, **kwargs):
model_args = dict(
block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep',
down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs)
return _create_resnet('senet154', pretrained, **model_args)
| state-spaces-main | src/models/baselines/resnet_timm.py |
# Copyright 2021 The ODE-LSTM Authors. All Rights Reserved.
"""Adapted from ODE-LSTM https://github.com/mlech26l/ode-lstms/."""
import torch
import torch.nn as nn
from torchdyn.models import NeuralDE
import pytorch_lightning as pl
from torchmetrics.functional import accuracy
class ODELSTMCell(nn.Module):
def __init__(self, d_model, d_hidden, solver_type="dopri5"):
super(ODELSTMCell, self).__init__()
self.solver_type = solver_type
self.fixed_step_solver = solver_type.startswith("fixed_")
self.lstm = nn.LSTMCell(d_model, d_hidden)
# 1 hidden layer NODE
self.f_node = nn.Sequential(
nn.Linear(d_hidden, d_hidden),
nn.Tanh(),
nn.Linear(d_hidden, d_hidden),
)
self.d_model = d_model
self.d_hidden = d_hidden
if not self.fixed_step_solver:
self.node = NeuralDE(self.f_node, solver=solver_type)
else:
options = {
"fixed_euler": self.euler,
"fixed_heun": self.heun,
"fixed_rk4": self.rk4,
}
if not solver_type in options.keys():
raise ValueError("Unknown solver type '{:}'".format(solver_type))
self.node = options[self.solver_type]
def forward(self, input, hx, ts):
new_h, new_c = self.lstm(input, hx)
if self.fixed_step_solver:
new_h = self.solve_fixed(new_h, ts)
else:
indices = torch.argsort(ts)
batch_size = ts.size(0)
device = input.device
s_sort = ts[indices]
s_sort = s_sort + torch.linspace(0, 1e-4, batch_size, device=device)
# HACK: Make sure no two points are equal
trajectory = self.node.trajectory(new_h, s_sort)
new_h = trajectory[indices, torch.arange(batch_size, device=device)]
return (new_h, new_c)
def solve_fixed(self, x, ts):
ts = ts.view(-1, 1)
for i in range(3): # 3 unfolds
x = self.node(x, ts * (1.0 / 3))
return x
def euler(self, y, delta_t):
dy = self.f_node(y)
return y + delta_t * dy
def heun(self, y, delta_t):
k1 = self.f_node(y)
k2 = self.f_node(y + delta_t * k1)
return y + delta_t * 0.5 * (k1 + k2)
def rk4(self, y, delta_t):
k1 = self.f_node(y)
k2 = self.f_node(y + k1 * delta_t * 0.5)
k3 = self.f_node(y + k2 * delta_t * 0.5)
k4 = self.f_node(y + k3 * delta_t)
return y + delta_t * (k1 + 2 * k2 + 2 * k3 + k4) / 6.0
class ODELSTM(nn.Module):
def __init__(
self,
d_model,
d_output=None,
d_hidden=None,
return_sequences=True,
solver_type="dopri5",
):
super(ODELSTM, self).__init__()
d_output = d_output or d_model
d_hidden = d_hidden or d_model
self.d_model = d_model
self.d_hidden = d_hidden
self.d_output = d_output
self.return_sequences = return_sequences
self.rnn_cell = ODELSTMCell(d_model, d_hidden, solver_type=solver_type)
self.fc = nn.Linear(self.d_hidden, self.d_output)
def forward(self, x, state=None, timespans=None, mask=None):
device = x.device
batch_size = x.size(0)
seq_len = x.size(1)
hidden_state = [
torch.zeros((batch_size, self.d_hidden), device=device),
torch.zeros((batch_size, self.d_hidden), device=device),
]
outputs = []
last_output = torch.zeros((batch_size, self.d_output), device=device)
if timespans is None:
timespans = x.new_ones(x.shape[:-1]+(1,)) / x.shape[1]
for t in range(seq_len):
inputs = x[:, t]
ts = timespans[:, t].squeeze()
hidden_state = self.rnn_cell.forward(inputs, hidden_state, ts)
current_output = self.fc(hidden_state[0])
outputs.append(current_output)
if mask is not None:
cur_mask = mask[:, t].view(batch_size, 1)
last_output = cur_mask * current_output + (1.0 - cur_mask) * last_output
else:
last_output = current_output
if self.return_sequences:
outputs = torch.stack(outputs, dim=1) # return entire sequence
else:
outputs = last_output # only last item
return outputs, hidden_state
class IrregularSequenceLearner(pl.LightningModule):
def __init__(self, model, lr=0.005):
super().__init__()
self.model = model
self.lr = lr
def training_step(self, batch, batch_idx):
if len(batch) == 4:
x, t, y, mask = batch
else:
x, t, y = batch
mask = None
y_hat = self.model.forward(x, t, mask)
y_hat = y_hat.view(-1, y_hat.size(-1))
y = y.view(-1)
loss = nn.CrossEntropyLoss()(y_hat, y)
preds = torch.argmax(y_hat.detach(), dim=-1)
acc = accuracy(preds, y)
self.log("train_acc", acc, prog_bar=True)
self.log("train_loss", loss, prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
if len(batch) == 4:
x, t, y, mask = batch
else:
x, t, y = batch
mask = None
y_hat = self.model.forward(x, t, mask)
y_hat = y_hat.view(-1, y_hat.size(-1))
y = y.view(-1)
loss = nn.CrossEntropyLoss()(y_hat, y)
preds = torch.argmax(y_hat, dim=1)
acc = accuracy(preds, y)
self.log("val_loss", loss, prog_bar=True)
self.log("val_acc", acc, prog_bar=True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=self.lr)
| state-spaces-main | src/models/baselines/odelstm.py |
"""The original Vision Transformer (ViT) from timm.
Copyright 2020 Ross Wightman.
"""
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from timm.models.layers import PatchEmbed, Mlp, trunc_normal_, lecun_normal_
from src.models.sequence.base import SequenceModule
from src.models.nn import Normalization
from src.models.sequence.backbones.block import SequenceResidualBlock
from src.utils.config import to_list, to_dict
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': None,
# 'crop_pct': .9,
# 'interpolation': 'bicubic',
# 'fixed_input_size': True,
# 'mean': IMAGENET_DEFAULT_MEAN,
# 'std': IMAGENET_DEFAULT_STD,
# 'first_conv': 'patch_embed.proj',
'classifier': 'head',
**kwargs,
}
default_cfgs = {
# patch models (my experiments)
'vit_small_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth',
),
# patch models (weights ported from official Google JAX impl)
'vit_base_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
# class Block(nn.Module):
# def __init__(
# self,
# dim,
# num_heads,
# mlp_ratio=4.,
# qkv_bias=False,
# qk_scale=None,
# drop=0.,
# attn_drop=0.,
# drop_path=0.,
# act_layer=nn.GELU,
# norm_layer=nn.LayerNorm,
# attnlinear_cfg=None,
# mlp_cfg=None
# ):
# super().__init__()
# self.norm1 = norm_layer(dim)
# self.attn = AttentionSimple(
# dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,
# linear_cfg=attnlinear_cfg)
# self.drop_path = StochasticDepth(drop_path, mode='row')
# self.norm2 = norm_layer(dim)
# mlp_hidden_dim = int(dim * mlp_ratio)
# if mlp_cfg is None:
# self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
# else:
# self.mlp = hydra.utils.instantiate(mlp_cfg, in_features=dim, hidden_features=mlp_hidden_dim,
# act_layer=act_layer, drop=drop, _recursive_=False)
# def forward(self, x):
# x = x + self.drop_path(self.attn(self.norm1(x)))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
# return x
class VisionTransformer(SequenceModule):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
d_model=768,
depth=12,
# num_heads=12,
expand=4,
# qkv_bias=True,
# qk_scale=None,
representation_size=None,
distilled=False,
dropout=0.,
# attn_drop_rate=0.,
drop_path_rate=0.,
embed_layer=PatchEmbed,
norm='layer',
# norm_layer=None,
# act_layer=None,
weight_init='',
# attnlinear_cfg=None,
# mlp_cfg=None,
layer=None,
# ff_cfg=None,
transposed=False,
layer_reps=1,
use_pos_embed=False,
use_cls_token=False,
track_norms=False,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
d_model (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
dropout (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.d_model = d_model # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
self.use_pos_embed = use_pos_embed
self.use_cls_token = use_cls_token
# norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
# act_layer = act_layer or nn.GELU
self.track_norms = track_norms
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=d_model,
)
num_patches = self.patch_embed.num_patches
self.cls_token = None
self.dist_token = None
if use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, d_model))
self.dist_token = nn.Parameter(torch.zeros(1, 1, d_model)) if distilled else None
else:
assert not distilled, 'Distillation token not supported without class token'
self.pos_embed = None
if use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, d_model))
self.pos_drop = nn.Dropout(p=dropout)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
# self.blocks = nn.Sequential(*[
# Block(
# dim=d_model, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
# drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,
# attnlinear_cfg=attnlinear_cfg, mlp_cfg=mlp_cfg)
# for i in range(depth)
# ])
self.transposed = transposed
layer = to_list(layer, recursive=False) * layer_reps
# Some special arguments are passed into each layer
for _layer in layer:
# If layers don't specify dropout, add it
if _layer.get('dropout', None) is None:
_layer['dropout'] = dropout
# Ensure all layers are shaped the same way
_layer['transposed'] = transposed
# # Layer arguments
# layer_cfg = layer.copy()
# layer_cfg['dropout'] = dropout
# layer_cfg['transposed'] = self.transposed
# layer_cfg['initializer'] = None
# # layer_cfg['l_max'] = L
# print("layer config", layer_cfg)
# Config for the inverted bottleneck
ff_cfg = {
'_name_': 'ffn',
'expand': int(expand),
'transposed': self.transposed,
'activation': 'gelu',
'initializer': None,
'dropout': dropout,
}
blocks = []
for i in range(depth):
for _layer in layer:
blocks.append(
SequenceResidualBlock(
d_input=d_model,
i_layer=i,
prenorm=True,
dropout=dropout,
layer=_layer,
residual='R',
norm=norm,
pool=None,
drop_path=dpr[i],
)
)
if expand > 0:
blocks.append(
SequenceResidualBlock(
d_input=d_model,
i_layer=i,
prenorm=True,
dropout=dropout,
layer=ff_cfg,
residual='R',
norm=norm,
pool=None,
drop_path=dpr[i],
)
)
self.blocks = nn.Sequential(*blocks)
# self.norm = norm_layer(d_model)
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(d_model, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(d_model, transposed=self.transposed, **norm)
# Representation layer: generally defaults to nn.Identity()
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(d_model, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s): TODO: move to decoder
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.d_model, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
# def get_classifier(self):
# if self.dist_token is None:
# return self.head
# else:
# return self.head, self.head_dist
# def reset_classifier(self, num_classes, global_pool=''):
# self.num_classes = num_classes
# self.head = nn.Linear(self.d_model, num_classes) if num_classes > 0 else nn.Identity()
# if self.num_tokens == 2:
# self.head_dist = nn.Linear(self.d_model, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
# TODO: move to encoder
x = self.patch_embed(x)
if self.use_cls_token:
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.use_pos_embed:
x = self.pos_drop(x + self.pos_embed)
if self.track_norms: output_norms = [torch.mean(x.detach() ** 2)]
for block in self.blocks:
x, _ = block(x)
if self.track_norms: output_norms.append(torch.mean(x.detach() ** 2))
x = self.norm(x)
if self.track_norms:
metrics = to_dict(output_norms, recursive=False)
self.metrics = {f'norm/{i}': v for i, v in metrics.items()}
if self.dist_token is None:
if self.use_cls_token:
return self.pre_logits(x[:, 0])
else:
# pooling: TODO move to decoder
return self.pre_logits(x.mean(1))
else:
return x[:, 0], x[:, 1]
def forward(self, x, rate=1.0, resolution=None, state=None):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x, None
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(m, (nn.Linear)):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.constant_(m.bias, head_bias)
elif n.startswith('pre_logits'):
lecun_normal_(m.weight)
nn.init.zeros_(m.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
else:
if m.bias is not None:
nn.init.zeros_(m.bias)
dense_init_fn_ = partial(trunc_normal_, std=.02)
if isinstance(m, nn.Linear):
dense_init_fn_(m.weight)
# elif isinstance(m, (BlockSparseLinear, BlockdiagLinear, LowRank)):
# m.set_weights_from_dense_init(dense_init_fn_)
elif jax_impl and isinstance(m, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
_logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed, getattr(model, 'num_tokens', 1),
model.patch_embed.grid_size)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
VisionTransformer,
variant,
pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" Tri's custom 'small' ViT model. d_model=768, depth=8, num_heads=8, mlp_ratio=3.
NOTE:
* this differs from the DeiT based 'small' definitions with d_model=384, depth=12, num_heads=6
* this model does not have a bias for QKV (unlike the official ViT and DeiT models)
"""
print(kwargs)
model_kwargs = dict(
patch_size=16,
d_model=768,
depth=8,
# num_heads=8,
expand=3,
# qkv_bias=False,
norm='layer',
# norm_layer=nn.LayerNorm,
)
model_kwargs = {
**model_kwargs,
**kwargs,
}
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault('qk_scale', 768 ** -0.5)
model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
d_model=768,
depth=12,
# num_heads=12,
)
model_kwargs = {
**model_kwargs,
**kwargs,
}
model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
| state-spaces-main | src/models/baselines/vit_all.py |
"""Adapted from https://github.com/vincentherrmann/pytorch-wavenet."""
import os
import os.path
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
import numpy as np
from src.models.sequence.base import SequenceModule
def mu_law_expansion(data, mu):
s = np.sign(data) * (np.exp(np.abs(data) * np.log(mu + 1)) - 1) / mu
return s
def dilate(x, dilation, init_dilation=1):
"""
:param x: Tensor of size (N, C, L), where N is the input dilation, C is the number of channels, and L is the input length
:param dilation: Target dilation. Will be the size of the first dimension of the output tensor.
:param pad_start: If the input length is not compatible with the specified dilation, zero padding is used. This parameter determines wether the zeros are added at the start or at the end.
:return: The dilated tensor of size (dilation, C, L*N / dilation). The output might be zero padded at the start
"""
[n, c, l] = x.size()
dilation_factor = dilation / init_dilation
if dilation_factor == 1:
return x
# zero padding for reshaping
new_l = int(np.ceil(l / dilation_factor) * dilation_factor)
if new_l != l:
l = new_l
x = constant_pad_1d(x, new_l)
l_old = int(round(l / dilation_factor))
n_old = int(round(n * dilation_factor))
l = math.ceil(l * init_dilation / dilation)
n = math.ceil(n * dilation / init_dilation)
# reshape according to dilation
x = x.permute(1, 2, 0).contiguous() # (n, c, l) -> (c, l, n)
x = x.view(c, l, n)
x = x.permute(2, 0, 1).contiguous() # (c, l, n) -> (n, c, l)
return x
class DilatedQueue:
def __init__(self, max_length, data=None, dilation=1, num_deq=1, num_channels=1, dtype=torch.FloatTensor):
self.in_pos = 0
self.out_pos = 0
self.num_deq = num_deq
self.num_channels = num_channels
self.dilation = dilation
self.max_length = max_length
self.data = data
self.dtype = dtype
if data == None:
self.data = Variable(dtype(num_channels, max_length).zero_())
def enqueue(self, input):
assert len(input.shape) == 3
if len(self.data.shape) == 2:
self.data = self.data.unsqueeze(0).repeat(input.shape[0], 1, 1)
self.data[:, :, self.in_pos] = input.squeeze(2)
self.in_pos = (self.in_pos + 1) % self.max_length
def dequeue(self, num_deq=1, dilation=1):
# |
# |6|7|8|1|2|3|4|5|
# |
start = self.out_pos - ((num_deq - 1) * dilation)
if start < 0:
t1 = self.data[:, :, start::dilation]
t2 = self.data[:, :, self.out_pos % dilation:self.out_pos + 1:dilation]
t = torch.cat((t1, t2), 2)
else:
t = self.data[:, :, start:self.out_pos + 1:dilation]
self.out_pos = (self.out_pos + 1) % self.max_length
return t
def reset(self, device):
self.data = Variable(self.dtype(self.num_channels, self.max_length).zero_()).to(device)
self.in_pos = 0
self.out_pos = 0
def constant_pad_1d(
input,
target_size,
):
cp1d = torch.nn.ConstantPad1d((target_size - input.size(-1), 0), 0)
return cp1d(input)
class WaveNetModel(SequenceModule):
"""
A Complete Wavenet Model
Args:
layers (Int): Number of layers in each block
blocks (Int): Number of wavenet blocks of this model
dilation_channels (Int): Number of channels for the dilated convolution
residual_channels (Int): Number of channels for the residual connection
skip_channels (Int): Number of channels for the skip connections
classes (Int): Number of possible values each sample can have
output_length (Int): Number of samples that are generated for each input
kernel_size (Int): Size of the dilation kernel
dtype: Parameter type of this model
Shape:
- Input: :math:`(N, C_{in}, L_{in})`
- Output: :math:`()`
L should be the length of the receptive field
"""
@property
def d_output(self):
return self.classes
def default_state(self, *batch_shape, device=None):
return None
def __init__(
self,
layers=10,
blocks=4,
dilation_channels=32,
residual_channels=32,
skip_channels=256,
end_channels=256,
classes=256,
# output_length=32,
kernel_size=2,
dtype=torch.FloatTensor,
bias=False,
):
super(WaveNetModel, self).__init__()
self.layers = layers
self.blocks = blocks
self.dilation_channels = dilation_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.classes = classes
self.kernel_size = kernel_size
self.dtype = dtype
self.d_model = 256
# build model
receptive_field = 1
init_dilation = 1
self.dilations = []
self.dilated_queues = []
# self.main_convs = nn.ModuleList()
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
# 1x1 convolution to create channels
self.start_conv = nn.Conv1d(in_channels=self.classes,
out_channels=residual_channels,
kernel_size=1,
bias=bias)
for b in range(blocks):
additional_scope = kernel_size - 1
new_dilation = 1
for i in range(layers):
# dilations of this layer
self.dilations.append((new_dilation, init_dilation))
# dilated queues for fast generation
self.dilated_queues.append(DilatedQueue(max_length=(kernel_size - 1) * new_dilation + 1,
num_channels=residual_channels,
dilation=new_dilation,
dtype=dtype))
# dilated convolutions
self.filter_convs.append(nn.Conv1d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=kernel_size,
bias=bias))
self.gate_convs.append(nn.Conv1d(in_channels=residual_channels,
out_channels=dilation_channels,
kernel_size=kernel_size,
bias=bias))
# 1x1 convolution for residual connection
self.residual_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=residual_channels,
kernel_size=1,
bias=bias))
# 1x1 convolution for skip connection
self.skip_convs.append(nn.Conv1d(in_channels=dilation_channels,
out_channels=skip_channels,
kernel_size=1,
bias=bias))
receptive_field += additional_scope
additional_scope *= 2
init_dilation = new_dilation
new_dilation *= 2
self.end_conv_1 = nn.Conv1d(in_channels=skip_channels,
out_channels=end_channels,
kernel_size=1,
bias=True)
self.end_conv_2 = nn.Conv1d(in_channels=end_channels,
out_channels=classes,
kernel_size=1,
bias=True)
self.receptive_field = receptive_field
# print("Receptive field: {}".format(self.receptive_field))
### TODO
# This piece of code used to go in the generation script to set up the WaveNet in autoregressive mode
# Instead of being in the generation script, it should go as part of this __init__ or default_state()
# if isinstance(model.model, WaveNetModel) and not benchmark:
# l_prefix += model.model.receptive_field
# T += model.model.receptive_field
# if x.shape[1] == 1:
# x = x.repeat(1, l_prefix + 1)
#########
def wavenet(self, input, dilation_func):
x = self.start_conv(input)
skip = 0
# WaveNet layers
for i in range(self.blocks * self.layers):
# |----------------------------------------| *residual*
# | |
# | |-- conv -- tanh --| |
# -> dilate -|----| * ----|-- 1x1 -- + --> *input*
# |-- conv -- sigm --| |
# 1x1
# |
# ---------------------------------------> + -------------> *skip*
(dilation, init_dilation) = self.dilations[i]
residual = dilation_func(x, dilation, init_dilation, i)
# dilated convolution
filter = self.filter_convs[i](residual)
filter = torch.tanh(filter)
gate = self.gate_convs[i](residual)
gate = torch.sigmoid(gate)
x = filter * gate
# parametrized skip connection
s = x
if x.size(2) != 1:
s = dilate(x, 1, init_dilation=dilation)
s = self.skip_convs[i](s)
try:
skip = skip[:, :, -s.size(2):]
except:
skip = 0
skip = s + skip
x = self.residual_convs[i](x)
x = x + residual[:, :, (self.kernel_size - 1):]
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
x = self.end_conv_2(x)
return x
def wavenet_dilate(self, input, dilation, init_dilation, i):
x = dilate(input, dilation, init_dilation)
return x
def queue_dilate(self, input, dilation, init_dilation, i):
queue = self.dilated_queues[i]
queue.enqueue(input)
x = queue.dequeue(num_deq=self.kernel_size,
dilation=dilation)
return x
def forward(self, input, state=None, **kwargs):
# BLD -> BDL
input = input.transpose(1, 2).contiguous()
x = self.wavenet(
input,
dilation_func=self.wavenet_dilate,
)
# reshape output
x = x.transpose(1, 2).contiguous()
x = x[:, -(input.shape[2] - self.receptive_field):]
return x, None
def step(self, x, state=None):
if len(x.shape) == 1:
x = x.unsqueeze(1).unsqueeze(1)
elif len(x.shape) == 2:
x = x.unsqueeze(1)
if state is None:
# Reset dilated queues
for queue in self.dilated_queues:
queue.reset(device=x.device)
x = x.transpose(1, 2).contiguous()
x = self.wavenet(x, dilation_func=self.queue_dilate)
x = x.transpose(1, 2).contiguous()
x = x.squeeze(1) # (batch, dim)
return x, self.dilated_queues
def generate(self,
num_samples,
first_samples=None,
temperature=1.):
self.eval()
if first_samples is None:
first_samples = self.dtype(1).zero_()
generated = Variable(first_samples, volatile=True)
num_pad = self.receptive_field - generated.size(0)
if num_pad > 0:
generated = constant_pad_1d(generated, self.scope)
print("pad zero")
for i in range(num_samples):
input = Variable(torch.FloatTensor(1, self.classes, self.receptive_field).zero_())
input = input.scatter_(1, generated[-self.receptive_field:].view(1, -1, self.receptive_field), 1.)
x = self.wavenet(input,
dilation_func=self.wavenet_dilate)[:, :, -1].squeeze()
if temperature > 0:
x /= temperature
prob = F.softmax(x, dim=0)
prob = prob.cpu()
np_prob = prob.data.numpy()
x = np.random.choice(self.classes, p=np_prob)
x = Variable(torch.LongTensor([x]))
else:
x = torch.max(x, 0)[1].float()
generated = torch.cat((generated, x), 0)
generated = (generated / self.classes) * 2. - 1
mu_gen = mu_law_expansion(generated, self.classes)
self.train()
return mu_gen
def parameter_count(self):
par = list(self.parameters())
s = sum([np.prod(list(d.size())) for d in par])
return s
def cpu(self, type=torch.FloatTensor):
self.dtype = type
for q in self.dilated_queues:
q.dtype = self.dtype
super().cpu()
def load_latest_model_from(location, use_cuda=True):
files = [location + "/" + f for f in os.listdir(location)]
newest_file = max(files, key=os.path.getctime)
print("load model " + newest_file)
if use_cuda:
model = torch.load(newest_file)
else:
model = load_to_cpu(newest_file)
return model
def load_to_cpu(path):
model = torch.load(path, map_location=lambda storage, loc: storage)
model.cpu()
return model
| state-spaces-main | src/models/baselines/wavenet.py |
"""Neural Rough Differential Equations."""
import torch
from torch import nn
from torchdiffeq import odeint, odeint_adjoint
import bisect
def rdeint(logsig, h0, func, method='rk4', adjoint=False, return_sequences=False):
"""Analogous to odeint but for RDEs.
Note that we do not have time intervals here. This is because the log-ode method is always evaluated on [0, 1] and
thus are grid is always [0, 1, ..., num_intervals+1].
Args:
logsig (torch.Tensor): A tensor of logsignature of shape [N, L, logsig_dim]
h0 (torch.Tensor): The initial value of the hidden state.
func (nn.Module): The function to apply to the state h0.
method (str): The solver to use.
adjoint (bool): Set True to use the adjoint method.
return_sequences (bool): Set True to return a prediction at each step, else return just terminal time.
Returns:
torch.Tensor: The values of the hidden states at the specified times. This has shape [N, L, num_hidden].
"""
# Method to get the logsig value
logsig_getter = _GetLogsignature(logsig)
# A cell to apply the output of the function linearly to correct log-signature piece.
cell = _NRDECell(logsig_getter, func)
# Set options
t, options, = set_options(logsig, return_sequences=return_sequences)
# Solve
odeint_func = odeint_adjoint if adjoint else odeint
output = odeint_func(func=cell, y0=h0, t=t, method=method, options=options).transpose(0, 1)
return output
def set_options(logsig, return_sequences=False, eps=1e-5):
"""Sets the options to be passed to the relevant `odeint` function.
Args:
logsig (torch.Tensor): The logsignature of the path.
return_sequences (bool): Set True if a regression problem where we need the full sequence. This requires us
specifying the time grid as `torch.arange(0, T_final)` which is less memory efficient that specifying
the times `t = torch.Tensor([0, T_final])` along with an `step_size=1` in the options.
eps (float): The epsilon perturbation to make to integration points to distinguish the ends.
Returns:
torch.Tensor, dict: The integration times and the options dictionary.
"""
length = logsig.size(1) + 1
if return_sequences:
t = torch.arange(0, length, dtype=torch.float).to(logsig.device)
options = {'eps': eps}
else:
options = {'step_size': 1, 'eps': eps}
t = torch.Tensor([0, length]).to(logsig.device)
return t, options
class _GetLogsignature:
"""Given a time value, gets the corresponding piece of the log-signature.
When performing a forward solve, torchdiffeq will give us the time value that it is solving the ODE on, and we need
to return the correct piece of the log-signature corresponding to that value. For example, let our intervals ends
be the integers from 0 to 10. Then if the time value returned by torchdiffeq is 5.5, we need to return the
logsignature on [5, 6]. This function simply holds the logsignature, and interval end times, and returns the
correct logsignature given any time.
"""
def __init__(self, logsig):
self.knots = range(logsig.size(1))
self.logsig = logsig
def __getitem__(self, t):
index = bisect.bisect(self.knots, t) - 1
return self.logsig[:, index]
class _NRDECell(nn.Module):
"""Applies the function to the previous hidden state, and then applies the output linearly onto the log-signature.
The NeuralRDE model solves the following equation:
dH = f(H) o logsignature(X_{t_i, t_{i+1}) dt; H(0) = H_t_i.
given a function f, this class applies that function to the hidden state, and then applies that result linearly onto
the correct piece of the logsignature.
"""
def __init__(self, logsig_getter, func):
super().__init__()
self.logsig_getter = logsig_getter
self.func = func
def forward(self, t, h):
A = self.func(h)
output = torch.bmm(A, self.logsig_getter[t].unsqueeze(2)).squeeze(2)
return output
class NeuralRDE(nn.Module):
"""The generic module for learning with Neural RDEs.
This class wraps the `NeuralRDECell` that acts like an RNN-Cell. This method simply initialises the hidden dynamics
and computes the updated hidden dynamics through a call to `ode_int` using the `NeuralRDECell` as the function that
computes the update.
Here we model the dynamics of some abstract hidden state H via a CDE, and the response as a linear functional of the
hidden state, that is:
dH = f(H)dX; Y = L(H).
"""
def __init__(self,
initial_dim,
logsig_dim,
hidden_dim,
output_dim,
hidden_hidden_dim=15,
num_layers=3,
apply_final_linear=True,
solver='midpoint',
adjoint=False,
return_sequences=False):
"""
Args:
initial_dim (int): We use the initial value (t_0 x_0) as an initial condition else we have translation
invariance.
logsig_dim (int): The dimension of the log-signature.
hidden_dim (int): The dimension of the hidden state.
output_dim (int): The dimension of the output.
hidden_hidden_dim (int): The dimension of the hidden layer in the RNN-like block.
num_layers (int): The number of hidden layers in the vector field. Set to 0 for a linear vector field.
apply_final_linear (bool): Set False to ignore the final linear output.
solver (str): ODE solver, must be implemented in torchdiffeq.
adjoint (bool): Set True to use odeint_adjoint.
return_sequences (bool): If True will return the linear function on the final layer, else linear function on
all layers.
"""
super().__init__()
self.initial_dim = initial_dim
self.logsig_dim = logsig_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.hidden_hidden_dim = hidden_hidden_dim
self.num_layers = num_layers
self.apply_final_linear = apply_final_linear
self.solver = solver
self.adjoint = adjoint
self.return_sequences = return_sequences
# Initial to hidden
self.initial_linear = nn.Linear(initial_dim, hidden_dim)
# The net applied to h_prev
self.func = _NRDEFunc(hidden_dim, logsig_dim, hidden_dim=hidden_hidden_dim, num_layers=num_layers)
# Linear classifier to apply to final layer
self.final_linear = nn.Linear(self.hidden_dim, self.output_dim) if apply_final_linear else lambda x: x
def forward(self, inputs):
# Setup the inital hidden layer
assert len(inputs) == 2, "`inputs` must be a 2-tuple containing `(inital_values, logsig)`."
initial, logsig = inputs
h0 = self.initial_linear(initial)
# Perform the adjoint operation
out = rdeint(
logsig, h0, self.func, method=self.solver, adjoint=self.adjoint, return_sequences=self.return_sequences
)
# Outputs
outputs = self.final_linear(out[:, -1, :]) if not self.return_sequences else self.final_linear(out)
return outputs
class _NRDEFunc(nn.Module):
"""The function applied to the hidden state in the log-ode method.
This creates a simple RNN-like block to be used as the computation function f in:
dh/dt = f(h) o logsig(X_{[t_i, t_{i+1}]})
To build a custom version, simply use any NN architecture such that `input_dim` is the size of the hidden state,
and the output dim must be of size `input_dim * logsig_dim`. Simply reshape the output onto a tensor of size
`[batch, input_dim, logsig_dim]`.
"""
def __init__(self, input_dim, logsig_dim, num_layers=1, hidden_dim=15):
super().__init__()
self.input_dim = input_dim
self.logsig_dim = logsig_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
# Additional layers are just hidden to hidden with relu activation
additional_layers = [nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)] * (num_layers - 1) if num_layers > 1 else []
# The net applied to h_prev
self.net = nn.Sequential(*[
nn.Linear(input_dim, hidden_dim),
*additional_layers,
nn.Tanh(),
nn.Linear(hidden_dim, input_dim * logsig_dim),
]) if num_layers > 0 else nn.Linear(input_dim, input_dim * logsig_dim)
def forward(self, h):
return self.net(h).view(-1, self.input_dim, self.logsig_dim)
| state-spaces-main | src/models/baselines/nrde.py |
"""Implementation of UnICORNN model.
Adapted from https://github.com/tk-rusch/unicornn/blob/main/health_care/network.py.
Original docstring:
This code implements a fast CUDA version of the stacked UnICORNN model.
We emphasise that this code builds up on the fast CUDA implementation of the IndRNN https://github.com/Sunnydreamrain/IndRNN_pytorch.
"""
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.nn import Parameter
from collections import namedtuple
from src.models.sequence.base import SequenceModule, TransposedModule
try:
from cupy.cuda import function
from pynvrtc.compiler import Program
_unicornn_available = True
except ImportError:
_unicornn_available = False
UnICORNN_CODE = """
extern "C" {
__forceinline__ __device__ float sigmoid(float x)
{
return (float) 1./(1.+exp(-x));
}
__forceinline__ __device__ float sigmoid_grad(float x)
{
return (float) exp(-x)/((1.+exp(-x))*(1.+exp(-x)));
}
__forceinline__ __device__ float activation(float x)
{
return (float)tanh(x);
}
__forceinline__ __device__ float calc_grad_activation(float x)
{
return (float)1/(cosh(x)*cosh(x));
}
__global__ void unicornn_fwd( const float * __restrict__ x,
const float * __restrict__ weight_hh, const float * __restrict__ hy_initial,
const float * __restrict__ hz_initial, float * __restrict__ hy_final,
float * __restrict__ hz_final,
const int len, const int batch, const int d_model, const float * __restrict__ c,
double dt, double alpha,
float * __restrict__ hy_all)
{
int ncols = batch*d_model;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
const float weight_hh_cur = *(weight_hh + (col%d_model));
const float c_cur = *(c + (col%d_model));
float hy = *(hy_initial + col);
float hz = *(hz_initial + col);
const float *xp = x+col;
float *hy_all_p = hy_all+col;
for (int row = 0; row < len; ++row)
{
hz -= dt*sigmoid(c_cur)*(activation(hy*weight_hh_cur+(*xp))+alpha*hy);
hy += dt*sigmoid(c_cur)*hz;
*hy_all_p = hy;
xp += ncols;
hy_all_p += ncols;
}
*(hy_final + col) = hy;
*(hz_final + col) = hz;
}
__global__ void unicornn_bwd(const float * __restrict__ x,
const float * __restrict__ weight_hh, const float * __restrict__ hy_final,
const float * __restrict__ hz_final,
const float * __restrict__ grad_h,
const int len, const int batch, const int d_model, const float * __restrict__ c,
double dt, double alpha, float * __restrict__ grad_x,
float * __restrict__ grad_weight_hh, float * __restrict__ grad_c)
{
int ncols = batch*d_model;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
const float weight_hh_cur = *(weight_hh + (col%d_model));
const float c_cur = *(c + (col%d_model));
float gweight_hh = 0;
float gc = 0;
const float *xp = x+col + (len-1)*ncols;
float *gxp = grad_x + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float delta_z = 0;
float delta_y = (*ghp);
float delta_dt = 0;
float hy = *(hy_final + col);
float hz = *(hz_final + col);
for (int row = len-1; row >= 0; --row)
{
delta_dt = delta_y*dt*sigmoid_grad(c_cur)*hz;
// reconstruct hidden states based on the final hidden state using adjoint symplectic Euler:
hy=hy-dt*sigmoid(c_cur)*hz;
hz=hz+dt*sigmoid(c_cur)*(activation(hy*weight_hh_cur+(*xp))+alpha*hy);
delta_z += delta_y*dt*sigmoid(c_cur);
gweight_hh -= delta_z*dt*sigmoid(c_cur)*calc_grad_activation(hy*weight_hh_cur+(*xp))*hy;
gc += delta_dt-delta_z*(dt*sigmoid_grad(c_cur)*(activation(hy*weight_hh_cur+(*xp))+alpha*hy));
*gxp = -delta_z*dt*sigmoid(c_cur)*calc_grad_activation(hy*weight_hh_cur+(*xp));
if(row==0)break;
ghp -= ncols;
delta_y += -delta_z*dt*sigmoid(c_cur)*(calc_grad_activation(hy*weight_hh_cur+(*xp))*weight_hh_cur+alpha) + (*ghp);
xp -= ncols;
gxp -= ncols;
}
atomicAdd(grad_weight_hh + (col%d_model), gweight_hh);
atomicAdd(grad_c + (col%d_model), gc);
}
}
"""
class UnICORNN_compile:
if _unicornn_available:
_UnICORNN_PROG = Program(UnICORNN_CODE, "unicornn_prog.cu")
_UnICORNN_PTX = _UnICORNN_PROG.compile()
_DEVICE2FUNC = {}
def __init__(self):
super(UnICORNN_compile, self).__init__()
def compile_functions(self):
device = torch.cuda.current_device()
mod = function.Module()
mod.load(bytes(self._UnICORNN_PTX.encode()))
fwd_func = mod.get_function("unicornn_fwd")
bwd_func = mod.get_function("unicornn_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (current_stream, fwd_func, bwd_func)
return current_stream, fwd_func, bwd_func
def get_functions(self):
res = self._DEVICE2FUNC.get(torch.cuda.current_device(), None)
return res if res else self.compile_functions()
class UnICORNN_Compute_GPU(Function):
@staticmethod
def forward(ctx, x, weight_hh, hy_initial, hz_initial, c, alpha, dt):
comp = UnICORNN_compile()
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d_model = x.size(-1)
ncols = batch * d_model
thread_per_block = min(512, ncols)
num_block = (ncols - 1) // thread_per_block + 1
size = (length, batch, d_model) if x.dim() == 3 else (batch, d_model)
hy_all = x.new(*size)
hy_final = x.new(batch, d_model)
hz_final = x.new(batch, d_model)
stream, fwd_func, _ = comp.get_functions()
FUNC = fwd_func
FUNC(
args=[
x.contiguous().data_ptr(),
weight_hh.contiguous().data_ptr(),
hy_initial.contiguous().data_ptr(),
hz_initial.contiguous().data_ptr(),
hy_final.contiguous().data_ptr(),
hz_final.contiguous().data_ptr(),
length,
batch,
d_model,
c.contiguous().data_ptr(),
dt.item(),
alpha.item(),
hy_all.contiguous().data_ptr(),
],
block=(thread_per_block, 1, 1),
grid=(num_block, 1, 1),
stream=stream,
)
ctx.save_for_backward(x, weight_hh, hy_final, hz_final, c, alpha, dt)
return hy_all
@staticmethod
def backward(ctx, grad_h):
x, weight_hh, hy_final, hz_final, c, alpha, dt = ctx.saved_tensors
comp = UnICORNN_compile()
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d_model = x.size(-1)
ncols = batch * d_model
thread_per_block = min(256, ncols)
num_block = (ncols - 1) // thread_per_block + 1
grad_x = x.new(*x.size())
grad_weight_hh = x.new(d_model).zero_()
grad_c = x.new(d_model).zero_()
stream, _, bwd_func = comp.get_functions()
FUNC = bwd_func
FUNC(
args=[
x.contiguous().data_ptr(),
weight_hh.contiguous().data_ptr(),
hy_final.contiguous().data_ptr(),
hz_final.contiguous().data_ptr(),
grad_h.contiguous().data_ptr(),
length,
batch,
d_model,
c.contiguous().data_ptr(),
dt.item(),
alpha.item(),
grad_x.contiguous().data_ptr(),
grad_weight_hh.contiguous().data_ptr(),
grad_c.contiguous().data_ptr(),
],
block=(thread_per_block, 1, 1),
grid=(num_block, 1, 1),
stream=stream,
)
return grad_x, grad_weight_hh, None, None, grad_c, None, None
class UnICORNN_recurrence(nn.Module):
def __init__(self, d_model, dt, alpha):
super(UnICORNN_recurrence, self).__init__()
self.d_model = d_model
self.dt = torch.tensor(dt)
self.c_ = Parameter(torch.Tensor(d_model))
self.alpha = torch.tensor(alpha)
self.weight_hh = Parameter(torch.Tensor(d_model))
self.reset_parameters()
def reset_parameters(self):
for name, weight in self.named_parameters():
if "weight_hh" in name:
nn.init.uniform_(weight, a=0, b=1)
if "c_" in name:
nn.init.uniform_(weight, a=-0.1, b=0.1)
def forward(self, input):
hy0, hz0 = (
input.data.new(input.size(-2), input.size(-1)).zero_(),
input.data.new(input.size(-2), input.size(-1)).zero_(),
)
return UnICORNN_Compute_GPU.apply(
input, self.weight_hh, hy0, hz0, self.c_, self.alpha, self.dt
)
class Dropout_overtime(torch.autograd.Function):
@staticmethod
def forward(ctx, input, p=0.5, training=False):
output = input.clone()
noise = input.data.new(input.size(-2), input.size(-1))
if training:
noise.bernoulli_(1 - p).div_(1 - p)
noise = noise.unsqueeze(0).expand_as(input)
output.mul_(noise)
ctx.save_for_backward(noise)
ctx.training = training
return output
@staticmethod
def backward(ctx, grad_output):
(noise,) = ctx.saved_tensors
if ctx.training:
return grad_output.mul(noise), None, None
else:
return grad_output, None, None
dropout_overtime = Dropout_overtime.apply
class LinearInitovertime(nn.Module):
def __init__(self, d_input, d_model, bias=True):
super(LinearInitovertime, self).__init__()
self.fc = nn.Linear(d_input, d_model, bias=bias)
self.d_input = d_input
self.d_model = d_model
def forward(self, x):
y = x.contiguous().view(-1, self.d_input)
y = self.fc(y)
y = y.view(x.size()[0], x.size()[1], self.d_model)
return y
@TransposedModule
class UnICORNN(SequenceModule):
def __init__(
self,
# d_input,
# d_output,
# l_output,
d_model,
dt,
alpha,
n_layers,
dropout=0.1,
**kwargs
):
if not _unicornn_available:
raise ImportError(
"Check unicornn codebase for install instructions. Requires cupy and pynvrtc."
)
super(UnICORNN, self).__init__()
self.d_model = d_model
self.d_output = d_model
self.dropout = dropout
self.nlayers = n_layers
# self.l_output = l_output
self.DIs = nn.ModuleList()
# denseinput = LinearInitovertime(d_input, nhid)
# self.DIs.append(denseinput)
# for x in range(self.nlayers - 1):
for x in range(self.nlayers):
denseinput = LinearInitovertime(d_model, d_model)
self.DIs.append(denseinput)
# self.classifier = nn.Linear(nhid, d_output)
self.init_weights()
self.RNNs = []
for x in range(self.nlayers):
rnn = UnICORNN_recurrence(d_model, dt[x], alpha)
self.RNNs.append(rnn)
self.RNNs = torch.nn.ModuleList(self.RNNs)
def init_weights(self):
for name, param in self.named_parameters():
if ("fc" in name) and "weight" in name:
nn.init.kaiming_uniform_(param, a=8, mode="fan_in")
# if "classifier" in name and "weight" in name:
# nn.init.kaiming_normal_(param.data)
if "bias" in name:
param.data.fill_(0.0)
def forward(self, input, *args, **kwargs):
input = input.transpose(0, 1)
rnnoutputs = {}
rnnoutputs["outlayer-1"] = input
for x in range(len(self.RNNs)):
rnnoutputs["dilayer%d" % x] = self.DIs[x](
rnnoutputs["outlayer%d" % (x - 1)]
)
rnnoutputs["outlayer%d" % x] = self.RNNs[x](rnnoutputs["dilayer%d" % x])
rnnoutputs["outlayer%d" % x] = dropout_overtime(
rnnoutputs["outlayer%d" % x], self.dropout, self.training
)
# temp = rnnoutputs["outlayer%d" % (len(self.RNNs) - 1)][-1]
# output = self.classifier(temp)
output = rnnoutputs["outlayer%d" % (len(self.RNNs) - 1)]
output = output.transpose(0, 1)
# if self.l_output == 0:
# output = output[:, -1]
# else:
# output = output[:, -self.l_output :]
return output
| state-spaces-main | src/models/baselines/unicornn.py |
"""Reproduction of ViT. Currently not used in favor of timm ViT."""
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from src.models.sequence.base import SequenceModule
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x, mask = None):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
mask_value = -torch.finfo(dots.dtype).max
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = rearrange(mask, 'b i -> b () i ()') * rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout))),
Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout)))
]))
def forward(self, x, mask = None):
for attn, ff in self.layers:
x = attn(x, mask = mask)
x = ff(x)
return x
class ViT(SequenceModule):
def __init__(
self,
d_model,
nhead: int = 8,
num_encoder_layers: int = 6,
dim_feedforward: int = 2048,
pool: str = "mean",
max_len: int = 2352, # add max len of sequence
dropout: float = 0.1,
activation: str = "gelu",
prenorm: bool = False,
prepend_class_token: bool = True,
**kwargs,
) -> None:
super().__init__()
self.d_model = d_model
self.d_output = d_model
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.pool = pool
self.pos_embedding = nn.Parameter(torch.randn(1, max_len + 1, d_model))
self.cls_token = nn.Parameter(torch.randn(1, 1, d_model))
self.dropout = nn.Dropout(dropout)
self.transformer = Transformer(d_model, num_encoder_layers, nhead, 4*d_model, dim_feedforward, dropout)
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(d_model),
# nn.Linear(d_model, d_output) # Should go in decoder
)
def forward(self, x, *args, **kwargs):
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
return self.mlp_head(x), None
| state-spaces-main | src/models/baselines/vit.py |
"""2D ResNet baselines from torchvision."""
import torch.nn as nn
import torchvision.models as models
from einops import rearrange
class TorchVisionResnet(nn.Module):
def __init__(
self,
variant="resnet18", # e.g. [ "resnet18" | "resnet34" | "resnet50" | "wide_resnet50_2" ]
):
super().__init__()
self.resnet = getattr(models, variant)(pretrained=False)
# Remove pooling from stem: too much downsizing for CIFAR
self.resnet.maxpool = nn.Identity()
# Remove final head: handled by decoder
self.d_output = self.resnet.fc.in_features
self.resnet.fc = nn.Identity()
self.resnet.avgpool = nn.Identity()
def forward(self, x, *args, **kwargs):
x = rearrange(x, 'b ... h -> b h ...')
if x.size(1) == 1:
x = x.repeat(1, 3, 1, 1)
elif x.size(1) == 3:
pass
else:
raise NotImplementedError
y = self.resnet(x)
return y, None
| state-spaces-main | src/models/baselines/resnet.py |
"""End-to-end classification Transformer adapted from PyTorch examples.
The isotropic model backbone should subsume this architecture. See config configs/model/transformer.yaml
"""
import copy
from typing import Optional, Any
from typing import Tuple
import torch
from torch import Tensor
from torch.nn import Module
from torch.nn import functional as F
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.init import xavier_uniform_
from torch.nn.modules.container import ModuleList
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
from torch.nn.modules.normalization import LayerNorm
from torch.nn.parameter import Parameter
class ClassificationTransformer(Module):
def __init__(
self,
d_input,
d_output,
d_model: int = 512,
nhead: int = 8,
num_encoder_layers: int = 6,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: str = "gelu",
prenorm: bool = False,
**kwargs,
) -> None:
super().__init__()
# Input projection to make the number of channels `d_model`
self.input_proj = torch.nn.Linear(
in_features=d_input,
out_features=d_model,
)
# Create the TransformerEncoder blocks
self.encoder = TransformerEncoder(
TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, share_qk=False, prenorm=prenorm
),
num_encoder_layers,
LayerNorm(d_model)
)
# Output projection
self.output_proj = torch.nn.Linear(
in_features=d_model,
out_features=d_output,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def forward(
self,
src: Tensor,
*args,
**kwargs
) -> Tensor:
# Encode the input (B, S, C)
x = self.input_proj(src)
x = self.encoder.forward(x)
return self.output_proj(x[:, -1, :]) # uses the encoding of the last "token" to classify
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)
model with corresponding parameters.
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
nhead: the number of heads in the multiheadattention models (default=8).
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
Examples::
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
>>> src = torch.rand((10, 32, 512))
>>> tgt = torch.rand((20, 32, 512))
>>> out = transformer_model(src, tgt)
Note: A full example to apply nn.Transformer module for the word language model is available in
https://github.com/pytorch/examples/tree/master/word_language_model
"""
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: str = "relu", custom_encoder: Optional[Any] = None,
custom_decoder: Optional[Any] = None, approx: dict = None) -> None:
super(Transformer, self).__init__()
if custom_encoder is not None:
self.encoder = custom_encoder
else:
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation,
share_qk=False)
encoder_norm = LayerNorm(d_model)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if custom_decoder is not None:
self.decoder = custom_decoder
else:
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation,
share_qk=False)
decoder_norm = LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Take in and process masked source/target sequences.
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
src_mask: the additive mask for the src sequence (optional).
tgt_mask: the additive mask for the tgt sequence (optional).
memory_mask: the additive mask for the encoder output (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).
memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`.
- tgt: :math:`(T, N, E)`.
- src_mask: :math:`(S, S)`.
- tgt_mask: :math:`(T, T)`.
- memory_mask: :math:`(T, S)`.
- src_key_padding_mask: :math:`(N, S)`.
- tgt_key_padding_mask: :math:`(N, T)`.
- memory_key_padding_mask: :math:`(N, S)`.
Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
[src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- output: :math:`(T, N, E)`.
Note: Due to the multi-head attention architecture in the transformer model,
the output sequence length of a transformer is same as the input sequence
(i.e. target) length of the decode.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
Examples:
>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
"""
if src.size(1) != tgt.size(1):
raise RuntimeError("the batch number of src and tgt must be equal")
if src.size(2) != self.d_model or tgt.size(2) != self.d_model:
raise RuntimeError("the feature number of src and tgt must be equal to d_model")
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
return output
def generate_square_subsequent_mask(self, sz: int) -> Tensor:
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
class TransformerEncoder(Module):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src: Tensor, mask: Optional[Tensor] = None, types: Optional[dict] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, types=types, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(Module):
r"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = transformer_decoder(tgt, memory)
"""
__constants__ = ['norm']
def __init__(self, decoder_layer, num_layers, norm=None):
super(TransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, tgt: Tensor, memory: Tensor, types: Optional[dict] = None, tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = tgt
for mod in self.layers:
output = mod(output, memory, types=types, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
share_qk=False,
prenorm=False,
):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, share_qk=share_qk)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.prenorm = prenorm
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: Tensor, types: Optional[dict] = None, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
if self.prenorm:
# src = self.norm1(src)
src2 = self.norm1(src)
src2 = self.self_attn(src2, src2, src2, types=types, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
else:
# Old code
src2 = self.self_attn(src, src, src, types=types, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
if self.prenorm:
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
else:
# Old code
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class TransformerDecoderLayer(Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", share_qk=False,
approx=None):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def _get_clones(module, N):
return ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
def kl(p, q):
kl_dis = F.kl_div(p, q)
return kl_dis
def mse(p, q):
mse_loss = F.mse_loss(p, q)
return mse_loss
def l1(p, q):
l1_loss = F.l1_loss(p, q)
return l1_loss
def smart_sort(x, permutation):
d1, d2 = x.size()
ret = x[
torch.arange(d1).unsqueeze(1).repeat((1, d2)).flatten(),
permutation.flatten()
].view(d1, d2)
return ret
def sparsify(target, params_reduction):
target_sparse = target.clone()
N, target_l, seq_l = target_sparse.shape
sorted_tensor, indices_tensor = torch.sort(target_sparse, dim=-1, descending=True)
topk = int(round(seq_l * (1 - params_reduction)))
mask = torch.zeros_like(target_sparse, dtype=torch.bool).scatter_(-1, indices_tensor[:, :, :topk], 1)
target_sparse[~mask] = float(
'-inf') # To zero out these values, we set their logit to be -inf, so that after softmax they are zero
return target_sparse, mask.bool()
def low_rank(target, sparsity):
N, target_l, seq_l = target.shape
target_lr = target.clone()
try:
u, s, v = torch.svd(target_lr)
topk = int(round(seq_l * (1 - sparsity)))
# assert torch.dist(target_lr, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.transpose(-2, -1)))<1e-2
s[:, topk:] = 0
target_lr = torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.transpose(-2, -1))
return target_lr, True
except: # torch.svd may have convergence issues for GPU and CPU.
return target_lr, False
def log_stats(approx, target):
eps = 1e-5
sparse_l1 = l1(approx, target)
sparse_kl = kl(torch.log(approx + eps), target + eps)
sparse_kl_inverse = kl(torch.log(target + eps), approx + eps)
return torch.cat([sparse_l1.view(1), sparse_kl.view(1), sparse_kl_inverse.view(1)])
def compute_single_distance(target_raw, attn_mask, params_reduction, approx_type, alpha=0.5):
stats = torch.zeros([1, 3])
target_raw[target_raw < -1e7] = float('-inf')
target = F.softmax(target_raw, dim=-1)
succeed = True
approx_target = 0
# sparse
if approx_type == "sparse":
target_sparse, mask = sparsify(target_raw, params_reduction)
if attn_mask is not None:
target_sparse.masked_fill_(attn_mask, float('-inf'), )
approx_target = torch.softmax(target_sparse, dim=-1)
stats = log_stats(approx_target, target)
# low_rank
elif approx_type == "low_rank":
new_sparsity = 1 - (1 - params_reduction) / 2
target_lr, succeed = low_rank(target, new_sparsity)
if succeed:
target_lr[target_lr < 0] = 0.0
if attn_mask is not None:
target_lr.masked_fill_(attn_mask, 0.0, )
approx_target = F.normalize(target_lr, p=1, dim=-1)
stats = log_stats(approx_target, target)
# sparse+low_rank
elif approx_type == "sparse_low_rank":
target_sparse = target.clone()
params_sparse = alpha * (1 - params_reduction)
_, mask = sparsify(target, 1 - params_sparse)
target_sparse[~mask] = 0.0
target_sparse_lr = target - target_sparse
params_lr = (1 - alpha) * (1 - params_reduction) / 2
target_sparse_lr, succeed = low_rank(target_sparse_lr, 1 - params_lr)
if succeed:
target_sparse_lr[target_sparse_lr < 0] = 0.0
target_sparse_lr += target_sparse
if attn_mask is not None:
target_sparse_lr.masked_fill_(attn_mask, 0.0, )
approx_target = F.normalize(target_sparse_lr, p=1, dim=-1)
stats = log_stats(approx_target, target)
else:
print("Approximation type is not implemented")
return approx_target, stats, succeed
class MultiheadAttention(torch.nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None,
vdim=None, share_qk=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.add_bias_kv = add_bias_kv
self.add_zero_attn = add_zero_attn
self.q_proj_weight = torch.nn.Linear(embed_dim, embed_dim, bias=self.bias)
self.k_proj_weight = torch.nn.Linear(embed_dim, self.kdim, bias=self.bias)
self.v_proj_weight = torch.nn.Linear(embed_dim, self.vdim, bias=self.bias)
xavier_uniform_(self.q_proj_weight.weight)
xavier_uniform_(self.k_proj_weight.weight)
xavier_uniform_(self.v_proj_weight.weight)
self.out_proj = torch.nn.Linear(embed_dim, self.vdim)
# self._reset_parameters()
if self.bias:
constant_(self.q_proj_weight.bias, 0.)
constant_(self.v_proj_weight.bias, 0.)
constant_(self.out_proj.bias, 0.)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
xavier_normal_(self.bias_k)
xavier_normal_(self.bias_v)
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
if share_qk:
self.in_proj_container = SharedQK_Proj(self.q_proj_weight, self.v_proj_weight)
else:
self.in_proj_container = InProjContainer(self.q_proj_weight, self.k_proj_weight, self.v_proj_weight)
self.multihead_attention = MultiheadAttentionContainer(num_heads,
self.in_proj_container,
ScaledDotProduct(self.dropout),
self.out_proj)
def forward(self, query, key, value, types=None, key_padding_mask=None, need_weights=True, attn_mask=None):
if attn_mask is not None:
if attn_mask.dim() == 2:
attn_mask = attn_mask.view(-1, attn_mask.size(0), attn_mask.size(1))
attn_mask = attn_mask.bool()
return self.multihead_attention(query, key, value, types, attn_mask, self.bias_k, self.bias_v)
class MultiheadAttentionContainer(torch.nn.Module):
def __init__(self, nhead, in_proj_container, attention_layer, out_proj):
r""" A multi-head attention container
Args:
nhead: the number of heads in the multiheadattention model
in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear).
attention_layer: The attention layer.
out_proj: The multi-head out-projection layer (a.k.a nn.Linear).
Examples::
>>> import torch
>>> embed_dim, num_heads, bsz = 10, 5, 64
>>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim),
torch.nn.Linear(embed_dim, embed_dim),
torch.nn.Linear(embed_dim, embed_dim))
>>> MHA = MultiheadAttentionContainer(num_heads,
in_proj_container,
ScaledDotProduct(),
torch.nn.Linear(embed_dim, embed_dim))
>>> query = torch.rand((21, bsz, embed_dim))
>>> key = value = torch.rand((16, bsz, embed_dim))
>>> attn_output, attn_weights = MHA(query, key, value)
>>> print(attn_output.shape)
>>> torch.Size([21, 64, 10])
"""
super(MultiheadAttentionContainer, self).__init__()
self.nhead = nhead
self.in_proj_container = in_proj_container
self.attention_layer = attention_layer
self.out_proj = out_proj
self.attn_map = 0
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
types: Optional[dict] = None,
attn_mask: Optional[torch.Tensor] = None,
bias_k: Optional[torch.Tensor] = None,
bias_v: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Args:
query, key, value (Tensor): map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
attn_mask, bias_k and bias_v (Tensor, optional): keyword arguments passed to the attention layer.
See the definitions in the attention.
Shape:
- Inputs:
- query: :math:`(L, N, E)`
- key: :math:`(S, N, E)`
- value: :math:`(S, N, E)`
- attn_mask, bias_k and bias_v: same with the shape of the corresponding args in attention layer.
- Outputs:
- attn_output: :math:`(L, N, E)`
- attn_output_weights: :math:`(N * H, L, S)`
where where L is the target length, S is the sequence length, H is the number of attention heads,
N is the batch size, and E is the embedding dimension.
"""
tgt_len, src_len, bsz, embed_dim = query.size(-3), key.size(-3), query.size(-2), query.size(-1)
q, k, v = self.in_proj_container(query, key, value)
assert q.size(-1) % self.nhead == 0, "query's embed_dim must be divisible by the number of heads"
head_dim = q.size(-1) // self.nhead
q = q.reshape(tgt_len, bsz * self.nhead, head_dim)
assert k.size(-1) % self.nhead == 0, "key's embed_dim must be divisible by the number of heads"
head_dim = k.size(-1) // self.nhead
k = k.reshape(src_len, bsz * self.nhead, head_dim)
assert v.size(-1) % self.nhead == 0, "value's embed_dim must be divisible by the number of heads"
head_dim = v.size(-1) // self.nhead
v = v.reshape(src_len, bsz * self.nhead, head_dim)
attn_output, attn_output_weights, self.attn_map = self.attention_layer(q, k, v,
types=types, attn_mask=attn_mask,
bias_k=bias_k, bias_v=bias_v)
attn_output = attn_output.reshape(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_output_weights
class ScaledDotProduct(torch.nn.Module):
def __init__(self, dropout=0.0):
r"""Processes a projected query and key-value pair to apply
scaled dot product attention.
Args:
dropout (float): probability of dropping an attention weight.
Examples::
>>> SDP = torchtext.models.ScaledDotProduct(0.1)
>>> q = torch.randn(256, 21, 3)
>>> k = v = torch.randn(256, 21, 3)
>>> attn_output, attn_weights = SDP(q, k, v)
>>> print(attn_output.shape, attn_weights.shape)
torch.Size([256, 21, 3]) torch.Size([256, 21, 21])
"""
super(ScaledDotProduct, self).__init__()
self.dropout = dropout
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
types: Optional[dict] = None,
attn_mask: Optional[torch.Tensor] = None,
bias_k: Optional[torch.Tensor] = None,
bias_v: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Uses a scaled dot product with the projected key-value pair to update
the projected query.
Args:
query (Tensor): Projected query
key (Tensor): Projected key
value (Tensor): Projected value
attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions.
bias_k and bias_v: (Tensor, optional): one more key and value sequence to be added at
sequence dim (dim=-3). Those are used for incremental decoding. Users should provide
non-None to both arguments in order to activate them.
Shape:
- query: :math:`(L, N * H, E / H)`
- key: :math:`(S, N * H, E / H)`
- value: :math:`(S, N * H, E / H)`
- attn_mask: :math:`(N * H, L, S)`, positions with ``True`` are not allowed to attend
while ``False`` values will be unchanged.
- bias_k and bias_v:bias: :math:`(1, N * H, E / H)`
- Output: :math:`(L, N * H, E / H)`, :math:`(N * H, L, S)`
where L is the target length, S is the source length, H is the number
of attention heads, N is the batch size, and E is the embedding dimension.
"""
if bias_k is not None and bias_v is not None:
assert key.size(-1) == bias_k.size(-1) and key.size(-2) == bias_k.size(-2) and bias_k.size(-3) == 1, \
"Shape of bias_k is not supported"
assert value.size(-1) == bias_v.size(-1) and value.size(-2) == bias_v.size(-2) and bias_v.size(-3) == 1, \
"Shape of bias_v is not supported"
key = torch.cat([key, bias_k])
value = torch.cat([value, bias_v])
if attn_mask is not None:
_attn_mask = attn_mask
attn_mask = torch.nn.functional.pad(_attn_mask, (0, 1))
tgt_len, head_dim = query.size(-3), query.size(-1)
assert query.size(-1) == key.size(-1) == value.size(-1), "The feature dim of query, key, value must be equal."
assert key.size() == value.size(), "Shape of key, value must match"
src_len = key.size(-3)
batch_heads = max(query.size(-2), key.size(-2))
# Scale query
query, key, value = query.transpose(-2, -3), key.transpose(-2, -3), value.transpose(-2, -3)
query = query * (float(head_dim) ** -0.5)
if attn_mask is not None:
if attn_mask.dim() != 3:
raise RuntimeError('attn_mask must be a 3D tensor.')
if (attn_mask.size(-1) != src_len) or (attn_mask.size(-2) != tgt_len) or \
(attn_mask.size(-3) != 1 and attn_mask.size(-3) != batch_heads):
raise RuntimeError('The size of the attn_mask is not correct.')
if attn_mask.dtype != torch.bool:
raise RuntimeError('Only bool tensor is supported for attn_mask')
# Dot product of q, k
attn_output_weights = torch.matmul(query, key.transpose(-2, -1))
if attn_mask is not None:
attn_output_weights.masked_fill_(attn_mask, -1e8, )
attn_map = {}
attn_map['attn'] = attn_output_weights
attn_map['stat'] = None
attn_map['succeed'] = None
# approx attn weights
if (types is not None) and (not self.training):
attn_output_weights, attn_map['stat'], attn_map['succeed'] = compute_single_distance \
(attn_map['attn'], attn_mask, types['params_reduction'],
types['approx_type'], alpha=types['alpha'])
else:
attn_output_weights = torch.nn.functional.softmax(attn_output_weights, dim=-1)
attn_output_weights = torch.nn.functional.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_output_weights, value)
return attn_output.transpose(-2, -3), attn_output_weights, attn_map
class SharedQK_Proj(torch.nn.Module):
def __init__(self, qk_proj, v_proj):
super(SharedQK_Proj, self).__init__()
self.qk_proj = qk_proj
self.v_proj = qk_proj
def forward(self, q, k, v):
return self.qk_proj(q), self.qk_proj(k), self.v_proj(v)
class InProjContainer(torch.nn.Module):
def __init__(self, query_proj, key_proj, value_proj):
r"""A in-proj container to process inputs.
Args:
query_proj: a proj layer for query.
key_proj: a proj layer for key.
value_proj: a proj layer for value.
"""
super(InProjContainer, self).__init__()
self.query_proj = query_proj
self.key_proj = key_proj
self.value_proj = value_proj
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Projects the input sequences using in-proj layers.
Args:
query, key, value (Tensors): sequence to be projected
Shape:
- query, key, value: :math:`(S, N, E)`
- Output: :math:`(S, N, E)`
where S is the sequence length, N is the batch size, and E is the embedding dimension.
"""
return self.query_proj(query), self.key_proj(key), self.value_proj(value)
def generate_square_subsequent_mask(nbatch, sz):
r"""Generate a square mask for the sequence. The masked positions are filled with True.
Unmasked positions are filled with False.
Args:
nbatch: the number of batch size
sz: the size of square mask
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1).repeat(nbatch, 1, 1)
return mask
| state-spaces-main | src/models/baselines/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the MIT license
"""ConvNext TIMM version with S4ND integration.
Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Original code and weights from https://github.com/facebookresearch/ConvNeXt, original copyright below
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.fx_features import register_notrace_module
# from timm.models.helpers import named_apply, build_model_with_cfg, checkpoint_seq
from timm.models.helpers import named_apply, build_model_with_cfg
from timm.models.layers import trunc_normal_, ClassifierHead, SelectAdaptivePool2d, DropPath, ConvMlp, Mlp
from timm.models.registry import register_model
import copy
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from omegaconf import OmegaConf
# S4 imports
import src.utils as utils
import src.utils.registry as registry
from src.models.nn import TransposedLinear
__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = dict(
convnext_tiny=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth"),
convnext_small=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth"),
convnext_base=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth"),
convnext_large=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth"),
convnext_nano_hnf=_cfg(url=''),
convnext_tiny_hnf=_cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth',
crop_pct=0.95),
convnext_tiny_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth'),
convnext_small_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth'),
convnext_base_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth'),
convnext_large_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth'),
convnext_xlarge_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth'),
convnext_tiny_384_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
convnext_small_384_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
convnext_base_384_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
convnext_large_384_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
convnext_xlarge_384_in22ft1k=_cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
convnext_tiny_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", num_classes=21841),
convnext_small_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", num_classes=21841),
convnext_base_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", num_classes=21841),
convnext_large_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", num_classes=21841),
convnext_xlarge_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", num_classes=21841),
)
def _is_contiguous(tensor: torch.Tensor) -> bool:
# jit is oh so lovely :/
# if torch.jit.is_tracing():
# return True
if torch.jit.is_scripting():
return tensor.is_contiguous()
else:
return tensor.is_contiguous(memory_format=torch.contiguous_format)
def get_num_layer_for_convnext(var_name, variant='tiny'):
"""
Divide [3, 3, 27, 3] layers into 12 groups; each group is three
consecutive blocks, including possible neighboring downsample layers;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
num_max_layer = 12
if "stem" in var_name:
return 0
# note: moved norm_layer outside of downsample module
elif "downsample" in var_name or "norm_layer" in var_name:
stage_id = int(var_name.split('.')[2])
if stage_id == 0:
layer_id = 0
elif stage_id == 1 or stage_id == 2:
layer_id = stage_id + 1
elif stage_id == 3:
layer_id = 12
return layer_id
elif "stages" in var_name:
stage_id = int(var_name.split('.')[2])
block_id = int(var_name.split('.')[4])
if stage_id == 0 or stage_id == 1:
layer_id = stage_id + 1
elif stage_id == 2:
if variant == 'tiny':
layer_id = 3 + block_id
else:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = 12
return layer_id
else:
return num_max_layer + 1
def get_num_layer_for_convnext_tiny(var_name):
return get_num_layer_for_convnext(var_name, 'tiny')
@register_notrace_module
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True):
""" tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
For some reason tie=False is dog slow, prob something wrong with torch.distribution
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, X):
""" X: (batch, dim, lengths...) """
if self.training:
# binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
mask_shape = X.shape[:2] + (1,)*(X.ndim-2) if self.tie else X.shape
# mask = self.binomial.sample(mask_shape)
mask = torch.rand(*mask_shape, device=X.device) < 1.-self.p
return X * mask * (1.0/(1-self.p))
return X
@register_notrace_module
class LayerNorm2d(nn.LayerNorm):
r""" LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W).
"""
def __init__(self, normalized_shape, eps=1e-6):
super().__init__(normalized_shape, eps=eps)
def forward(self, x) -> torch.Tensor:
if _is_contiguous(x):
return F.layer_norm(
x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)
else:
s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True)
x = (x - u) * torch.rsqrt(s + self.eps)
x = x * self.weight[:, None, None] + self.bias[:, None, None]
return x
@register_notrace_module
class LayerNorm3d(nn.LayerNorm):
r""" LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, L, H, W).
"""
def __init__(self, normalized_shape, eps=1e-6):
super().__init__(normalized_shape, eps=eps)
def forward(self, x) -> torch.Tensor:
if _is_contiguous(x):
return F.layer_norm(
x.permute(0, 2, 3, 4, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 4, 1, 2, 3)
else:
s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True)
x = (x - u) * torch.rsqrt(s + self.eps)
x = x * self.weight[:, None, None, None] + self.bias[:, None, None, None]
return x
@register_notrace_module
class TransposedLN(nn.Module):
def __init__(self, d, scalar=True):
super().__init__()
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
def forward(self, x):
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s/s) * (x-m+self.m)
return y
class Conv2dWrapper(nn.Module):
"""
Light wrapper used to just absorb the resolution flag (like s4's conv layer)
"""
def __init__(self, dim_in, dim_out, **kwargs):
super().__init__()
self.conv = nn.Conv2d(dim_in, dim_out, **kwargs)
def forward(self, x, resolution=None):
return self.conv(x)
class S4DownSample(nn.Module):
""" S4 conv block with downsampling using avg pool
Args:
downsample_layer (dict): config for creating s4 layer
in_ch (int): num input channels
out_ch (int): num output channels
stride (int): downsample factor in avg pool
"""
def __init__(self, downsample_layer, in_ch, out_ch, stride=1, activate=False, glu=False, pool3d=False):
super().__init__()
# create s4
self.s4conv = utils.instantiate(registry.layer, downsample_layer, in_ch)
self.act = nn.GELU() if activate else nn.Identity()
if pool3d:
self.avgpool = nn.AvgPool3d(kernel_size=stride, stride=stride)
else:
self.avgpool = nn.AvgPool2d(kernel_size=stride, stride=stride)
self.glu = glu
d_out = 2*out_ch if self.glu else out_ch
self.fc = TransposedLinear(in_ch, d_out)
def forward(self, x, resolution=1):
x = self.s4conv(x, resolution)
x = self.act(x)
x = self.avgpool(x)
x = self.fc(x)
if self.glu:
x = F.glu(x, dim=1)
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
# previous convnext notes:
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
# two options for convs are:
- conv2d, depthwise (original)
- s4nd, used if a layer config passed
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
layer (config/dict): config for s4 layer
"""
def __init__(self,
dim,
drop_path=0.,
ls_init_value=1e-6,
conv_mlp=False,
mlp_ratio=4,
norm_layer=None,
layer=None,
):
super().__init__()
assert norm_layer is not None
mlp_layer = ConvMlp if conv_mlp else Mlp
self.use_conv_mlp = conv_mlp
# Depthwise conv
if layer is None:
self.conv_dw = Conv2dWrapper(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
else:
self.conv_dw = utils.instantiate(registry.layer, layer, dim)
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=nn.GELU)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, resolution=1):
shortcut = x
x = self.conv_dw(x, resolution)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class Stem(nn.Module):
def __init__(self,
stem_type='patch', # regular convnext
in_ch=3,
out_ch=64,
img_size=None,
patch_size=4,
stride=4,
stem_channels=32,
stem_layer=None,
stem_l_max=None,
downsample_act=False,
downsample_glu=False,
norm_layer=None,
):
super().__init__()
self.stem_type = stem_type
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.pre_stem = None
self.post_stem = None
if stem_type == 'patch':
print("stem type: ", 'patch')
self.stem = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=patch_size, stride=patch_size),
norm_layer(out_ch)
)
elif stem_type == 'depthwise_patch':
print("stem type: ", 'depthwise_patch')
self.stem = nn.Sequential(
nn.Conv2d(in_ch, stem_channels, kernel_size=1, stride=1, padding=0),
nn.Conv2d(stem_channels, stem_channels, kernel_size=patch_size, stride=1, padding='same', groups=stem_channels),
nn.AvgPool2d(kernel_size=patch_size, stride=patch_size),
TransposedLinear(stem_channels, 2*out_ch),
nn.GLU(dim=1),
norm_layer(out_ch),
)
elif stem_type == 'new_patch':
print("stem type: ", 'new_patch')
self.stem = nn.Sequential(
nn.Conv2d(in_ch, stem_channels, kernel_size=patch_size, stride=1, padding='same'),
nn.AvgPool2d(kernel_size=patch_size, stride=patch_size),
TransposedLinear(stem_channels, 2*out_ch),
nn.GLU(dim=1),
norm_layer(out_ch),
)
elif stem_type == 'new_s4nd_patch':
print("stem type: ", 'new_s4nd_patch')
stem_layer_copy = copy.deepcopy(stem_layer)
assert stem_l_max is not None, "need to provide a stem_l_max to use stem=new_s4nd_patch"
stem_layer_copy["l_max"] = stem_l_max
self.pre_stem = nn.Identity()
self.stem = utils.instantiate(registry.layer, stem_layer_copy, in_ch, out_channels=stem_channels)
self.post_stem = nn.Sequential(
nn.AvgPool2d(kernel_size=patch_size, stride=patch_size),
TransposedLinear(stem_channels, 2*out_ch),
nn.GLU(dim=1),
norm_layer(out_ch)
)
elif stem_type == 's4nd_patch':
print("stem type: ", "s4nd_patch")
stem_layer_copy = copy.deepcopy(stem_layer)
stem_layer_copy["l_max"] = img_size
self.pre_stem = nn.Conv2d(in_ch, stem_channels, kernel_size=1, stride=1, padding=0)
# s4 + norm + avg pool + linear
self.stem = S4DownSample(stem_layer_copy, stem_channels, out_ch, stride=patch_size, activate=downsample_act, glu=downsample_glu)
self.post_stem = norm_layer(out_ch)
elif stem_type == 's4nd':
# mix of conv2d + s4
print("stem type: ", 's4nd')
stem_layer_copy = copy.deepcopy(stem_layer)
stem_layer_copy["l_max"] = img_size
# s4_downsample = nn.Sequential(
# utils.instantiate(registry.layer, stage_layer_copy, stem_channels),
# nn.AvgPool2d(kernel_size=2, stride=2),
# TransposedLinear(stem_channels, 64),
# )
s4_downsample = S4DownSample(stem_layer_copy, stem_channels, 64, stride=2, activate=downsample_act, glu=downsample_glu)
self.pre_stem = nn.Sequential(
nn.Conv2d(in_ch, stem_channels, kernel_size=1, stride=1, padding=0),
norm_layer(stem_channels),
nn.GELU()
)
self.stem = s4_downsample
self.post_stem = nn.Identity()
# regular strided conv downsample
elif stem_type == 'default':
print("stem type: DEFAULT. Make sure this is what you want.")
self.stem = nn.Sequential(
nn.Conv2d(in_ch, 32, kernel_size=3, stride=2, padding=1),
norm_layer(32),
nn.GELU(),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
)
else:
raise NotImplementedError("provide a valid stem type!")
def forward(self, x, resolution):
# if using s4nd layer, need to pass resolution
if self.stem_type in ['s4nd', 's4nd_patch', 'new_s4nd_patch']:
x = self.pre_stem(x)
x = self.stem(x, resolution)
x = self.post_stem(x)
else:
x = self.stem(x)
return x
class ConvNeXtStage(nn.Module):
"""
Will create a stage, made up of downsampling and conv blocks.
There are 2 choices for each of these:
downsampling: s4 or strided conv (original)
conv stage: s4 or conv2d (original)
"""
def __init__(
self,
in_chs,
out_chs,
img_size=None,
stride=2,
depth=2,
dp_rates=None,
ls_init_value=1.0,
conv_mlp=False,
norm_layer=None,
cl_norm_layer=None,
# cross_stage=False,
stage_layer=None, # config
# downsample_layer=None,
downsample_type=None,
downsample_act=False,
downsample_glu=False,
):
super().__init__()
self.grad_checkpointing = False
self.downsampling = False
# 2 options to downsample
if in_chs != out_chs or stride > 1:
self.downsampling = True
# s4 type copies config from corresponding stage layer
if downsample_type == 's4nd':
print("s4nd downsample")
downsample_layer_copy = copy.deepcopy(stage_layer)
downsample_layer_copy["l_max"] = img_size # always need to update curr l_max
self.norm_layer = norm_layer(in_chs)
# mimics strided conv but w/s4
self.downsample = S4DownSample(downsample_layer_copy, in_chs, out_chs, stride=stride, activate=downsample_act, glu=downsample_glu)
# strided conv
else:
print("strided conv downsample")
self.norm_layer = norm_layer(in_chs)
self.downsample = Conv2dWrapper(in_chs, out_chs, kernel_size=stride, stride=stride)
# else:
# self.norm_layer = nn.Identity()
# self.downsample = nn.Identity()
if stage_layer is not None:
stage_layer["l_max"] = [x // stride for x in img_size]
dp_rates = dp_rates or [0.] * depth
self.blocks = nn.ModuleList()
for j in range(depth):
self.blocks.append(
ConvNeXtBlock(
dim=out_chs, drop_path=dp_rates[j], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer if conv_mlp else cl_norm_layer, layer=stage_layer)
)
def forward(self, x, resolution=1):
if self.downsampling:
x = self.norm_layer(x)
x = self.downsample(x, resolution)
for block in self.blocks:
x = block(x, resolution)
# not downsampling we just don't create a downsample layer, since before Identity can't accept pass through args
else:
for block in self.blocks:
x = block(x, resolution)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_head (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
patch_size=4,
stem_channels=8,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
ls_init_value=1e-6,
conv_mlp=False, # whether to transpose channels to last dim inside MLP
stem_type='patch', # supports `s4nd` + avg pool
stem_l_max=None, # len of l_max in stem (if using s4)
downsample_type='patch', # supports `s4nd` + avg pool
downsample_act=False,
downsample_glu=False,
head_init_scale=1.,
head_norm_first=False,
norm_layer=None,
custom_ln=False,
drop_head=0.,
drop_path_rate=0.,
layer=None, # Shared config dictionary for the core layer
stem_layer=None,
stage_layers=None,
img_size=None,
# **kwargs, # catch all
):
super().__init__()
assert output_stride == 32
if norm_layer is None:
if custom_ln:
norm_layer = TransposedLN
else:
norm_layer = partial(LayerNorm2d, eps=1e-6)
cl_norm_layer = norm_layer if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
cl_norm_layer = norm_layer
self.num_classes = num_classes
self.drop_head = drop_head
self.feature_info = []
self._img_sizes = [img_size]
# Broadcast dictionaries
if layer is not None:
stage_layers = [OmegaConf.merge(layer, s) for s in stage_layers]
stem_layer = OmegaConf.merge(layer, stem_layer)
# instantiate stem
self.stem = Stem(
stem_type=stem_type,
in_ch=in_chans,
out_ch=dims[0],
img_size=img_size,
patch_size=patch_size,
stride=patch_size,
stem_channels=stem_channels,
stem_layer=stem_layer,
stem_l_max=stem_l_max,
downsample_act=downsample_act,
downsample_glu=downsample_glu,
norm_layer=norm_layer,
)
if stem_type == 's4nd' or stem_type == 'default':
stem_stride = 2
prev_chs = 64
else:
stem_stride = patch_size
prev_chs = dims[0]
curr_img_size = [x // stem_stride for x in img_size]
self._img_sizes.append(curr_img_size)
self.stages = nn.ModuleList()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(4):
# if stem downsampled by 4, then in stage 0, we don't downsample
# if stem downsampled by 2, then in stage 0, we downsample by 2
# all other stages we downsample by 2 no matter what
stride = 1 if i==0 and stem_stride == 4 else 2 # stride 1 is no downsample (because already ds in stem)
# print("stage {}, before downsampled img size {}, stride {}".format(i, curr_img_size, stride))
out_chs = dims[i]
self.stages.append(ConvNeXtStage(
prev_chs,
out_chs,
img_size=curr_img_size,
stride=stride,
depth=depths[i],
dp_rates=dp_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
norm_layer=norm_layer,
cl_norm_layer=cl_norm_layer,
stage_layer=stage_layers[i],
downsample_type=downsample_type,
downsample_act=downsample_act,
downsample_glu=downsample_glu,
)
)
prev_chs = out_chs
curr_img_size = [x // stride for x in curr_img_size] # update image size for next stage
self._img_sizes.append(curr_img_size)
# # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
# self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
# self.stages = nn.Sequential(*stages)
self.num_features = prev_chs
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
self.norm_pre = norm_layer(self.num_features) if head_norm_first else nn.Identity()
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', nn.Identity() if head_norm_first else norm_layer(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_head)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())]))
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool=None):
if global_pool is not None:
self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity()
self.head.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x, resolution=1):
x = self.stem(x, resolution)
for stage in self.stages:
x = stage(x, resolution)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
# NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :(
x = self.head.global_pool(x)
x = self.head.norm(x)
x = self.head.flatten(x)
x = self.head.drop(x)
return x if pre_logits else self.head.fc(x)
def forward(self, x, resolution=1, state=None):
x = self.forward_features(x, resolution)
x = self.forward_head(x)
return x, None
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
# check if has bias first
if module.bias is not None:
nn.init.constant_(module.bias, 0)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict:
return state_dict # non-FB checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
k = k.replace('head.', 'head.fc.')
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_convnext(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
# @register_model
# def convnext_nano_hnf(pretrained=False, **kwargs):
# model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), head_norm_first=True, conv_mlp=True, **kwargs)
# model = _create_convnext('convnext_nano_hnf', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_tiny_hnf(pretrained=False, **kwargs):
# model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True, **kwargs)
# model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_tiny_hnfd(pretrained=False, **kwargs):
# model_args = dict(
# depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True, stem_type='dual', **kwargs)
# model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **model_args)
# return model
@register_model
def convnext_micro(pretrained=False, **kwargs):
model_args = dict(depths=(3, 3, 3, 3), dims=(64, 128, 256, 512), **kwargs)
model = _create_convnext('convnext_tiny', pretrained=pretrained, **model_args)
return model
@register_model
def convnext_tiny(pretrained=False, **kwargs):
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs)
model = _create_convnext('convnext_tiny', pretrained=pretrained, **model_args)
return model
@register_model
def convnext_small(pretrained=False, **kwargs):
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
model = _create_convnext('convnext_small', pretrained=pretrained, **model_args)
return model
@register_model
def convnext_base(pretrained=False, **kwargs):
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
model = _create_convnext('convnext_base', pretrained=pretrained, **model_args)
return model
# @register_model
# def convnext_large(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
# model = _create_convnext('convnext_large', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_tiny_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_tiny_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_small_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_small_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_base_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_base_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_large_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
# model = _create_convnext('convnext_large_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_xlarge_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
# model = _create_convnext('convnext_xlarge_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_tiny_384_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_tiny_384_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_small_384_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_small_384_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_base_384_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_base_384_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_large_384_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
# model = _create_convnext('convnext_large_384_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_xlarge_384_in22ft1k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
# model = _create_convnext('convnext_xlarge_384_in22ft1k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_tiny_in22k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_tiny_in22k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_small_in22k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_small_in22k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_base_in22k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
# model = _create_convnext('convnext_base_in22k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_large_in22k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
# model = _create_convnext('convnext_large_in22k', pretrained=pretrained, **model_args)
# return model
# @register_model
# def convnext_xlarge_in22k(pretrained=False, **kwargs):
# model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
# model = _create_convnext('convnext_xlarge_in22k', pretrained=pretrained, **model_args)
# return model
class Conv3d(nn.Conv3d):
def __init__(self, in_ch, out_ch, kernel_size, stride, padding=0, groups=1, factor=False):
super().__init__(in_ch, out_ch, kernel_size, stride=stride, padding=padding, groups=groups)
self.factor = factor
self.in_ch=in_ch
self.out_ch=out_ch
self.kernel_size=[kernel_size] if isinstance(kernel_size, int) else kernel_size
self.stride=stride
self.padding=padding
self.groups=groups
if self.factor:
self.weight = nn.Parameter(self.weight[:, :, 0, :, :]) # Subsample time dimension
self.time_weight = nn.Parameter(self.weight.new_ones(self.kernel_size[0]) / self.kernel_size[0])
else:
pass
def forward(self, x):
if self.factor:
weight = self.weight[:, :, None, :, :] * self.time_weight[:, None, None]
y = F.conv3d(x, weight, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups)
else:
y = super().forward(x)
return y
class Conv3dWrapper(nn.Module):
"""
Light wrapper to make consistent with 2d version (allows for easier inflation).
"""
def __init__(self, dim_in, dim_out, **kwargs):
super().__init__()
self.conv = Conv3d(dim_in, dim_out, **kwargs)
def forward(self, x, resolution=None):
return self.conv(x)
class ConvNeXtBlock3D(nn.Module):
""" ConvNeXt Block
# previous convnext notes:
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
# two options for convs are:
- conv2d, depthwise (original)
- s4nd, used if a layer config passed
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
layer (config/dict): config for s4 layer
"""
def __init__(self,
dim,
drop_path=0.,
drop_mlp=0.,
ls_init_value=1e-6,
conv_mlp=False,
mlp_ratio=4,
norm_layer=None,
block_tempor_kernel=3,
layer=None,
factor_3d=False,
):
super().__init__()
assert norm_layer is not None
# if not norm_layer:
# norm_layer = partial(LayerNorm2d, eps=1e-6) if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
mlp_layer = ConvMlp if conv_mlp else Mlp
self.use_conv_mlp = conv_mlp
# Depthwise conv
if layer is None:
tempor_padding = block_tempor_kernel // 2 # or 2
# self.conv_dw = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.conv_dw = Conv3dWrapper(
dim,
dim,
kernel_size=(block_tempor_kernel, 7, 7),
padding=(tempor_padding, 3, 3),
stride=(1, 1, 1),
groups=dim,
factor=factor_3d,
) # depthwise conv
else:
self.conv_dw = utils.instantiate(registry.layer, layer, dim)
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=nn.GELU, drop=drop_mlp)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 4, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 4, 1, 2, 3)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class ConvNeXtStage3D(nn.Module):
"""
Will create a stage, made up of downsampling and conv blocks.
There are 2 choices for each of these:
downsampling: s4 or strided conv (original)
conv stage: s4 or conv2d (original)
"""
def __init__(
self,
in_chs,
out_chs,
video_size=None, # L, H, W
stride=(2, 2, 2), # Strides for L, H, W
depth=2,
dp_rates=None,
ls_init_value=1.0,
conv_mlp=False,
norm_layer=None,
cl_norm_layer=None,
stage_layer=None, # config
block_tempor_kernel=3,
downsample_type=None,
downsample_act=False,
downsample_glu=False,
factor_3d=False,
drop_mlp=0.,
):
super().__init__()
self.grad_checkpointing = False
# 2 options to downsample
if in_chs != out_chs or np.any(np.array(stride) > 1):
# s4 type copies config from corresponding stage layer
if downsample_type == 's4nd':
print("s4nd downsample")
downsample_layer_copy = copy.deepcopy(stage_layer)
downsample_layer_copy["l_max"] = video_size # always need to update curr l_max
self.norm_layer = norm_layer(in_chs)
# mimics strided conv but w/s4
self.downsample = S4DownSample(
downsample_layer_copy,
in_chs,
out_chs,
stride=stride,
activate=downsample_act,
glu=downsample_glu,
pool3d=True,
)
# self.downsample = nn.Sequential(
# norm_layer(in_chs),
# S4DownSample(
# downsample_layer_copy,
# in_chs,
# out_chs,
# stride=stride,
# activate=downsample_act,
# glu=downsample_glu,
# pool3d=True,
# )
# )
# strided conv
else:
print("strided conv downsample")
self.norm_layer = norm_layer(in_chs)
self.downsample = Conv3dWrapper(in_chs, out_chs, kernel_size=stride, stride=stride, factor=factor_3d)
# self.downsample = nn.Sequential(
# norm_layer(in_chs),
# Conv3d(in_chs, out_chs, kernel_size=stride, stride=stride, factor=factor_3d),
# )
else:
self.norm_layer = nn.Identity()
self.downsample = nn.Identity()
if stage_layer is not None:
stage_layer["l_max"] = [
x // stride if isinstance(stride, int) else x // stride[i]
for i, x in enumerate(video_size)
]
dp_rates = dp_rates or [0.] * depth
self.blocks = nn.Sequential(*[
ConvNeXtBlock3D(
dim=out_chs,
drop_path=dp_rates[j],
drop_mlp=drop_mlp,
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
norm_layer=norm_layer if conv_mlp else cl_norm_layer,
block_tempor_kernel=block_tempor_kernel,
layer=stage_layer,
factor_3d=factor_3d,
)
for j in range(depth)
])
def forward(self, x):
x = self.norm_layer(x)
x = self.downsample(x)
x = self.blocks(x)
return x
class Stem3d(nn.Module):
def __init__(self,
stem_type='patch', # supports `s4nd` + avg pool
in_chans=3,
spatial_patch_size=4,
tempor_patch_size=4,
stem_channels=8,
dims=(96, 192, 384, 768),
stem_l_max=None, # len of l_max in stem (if using s4)
norm_layer=None,
custom_ln=False,
layer=None, # Shared config dictionary for the core layer
stem_layer=None,
factor_3d=False,
):
super().__init__()
self.stem_type = stem_type
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
if stem_type == 'patch':
print("stem type: ", 'patch')
kernel_3d = [tempor_patch_size, spatial_patch_size, spatial_patch_size]
self.stem = nn.Sequential(
Conv3d(
in_chans,
dims[0],
kernel_size=kernel_3d,
stride=kernel_3d,
factor=factor_3d,
),
norm_layer(dims[0]),
)
elif stem_type == 'new_s4nd_patch':
print("stem type: ", 'new_s4nd_patch')
stem_layer_copy = copy.deepcopy(stem_layer)
assert stem_l_max is not None, "need to provide a stem_l_max to use stem=new_s4nd_patch"
stem_layer_copy["l_max"] = stem_l_max
s4_ds = utils.instantiate(registry.layer, stem_layer_copy, in_chans, out_channels=stem_channels)
kernel_3d = [tempor_patch_size, spatial_patch_size, spatial_patch_size]
self.stem = nn.Sequential(
s4_ds,
nn.AvgPool3d(kernel_size=kernel_3d, stride=kernel_3d),
TransposedLinear(stem_channels, 2*dims[0]),
nn.GLU(dim=1),
norm_layer(dims[0]),
)
else:
raise NotImplementedError("provide a valid stem type!")
def forward(self, x, resolution=None):
# if using s4nd layer, need to pass resolution
if self.stem_type in ['new_s4nd_patch']:
x = self.stem(x, resolution)
else:
x = self.stem(x)
return x
class ConvNeXt3D(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_head (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
# global_pool='avg',
spatial_patch_size=4,
tempor_patch_size=4,
output_spatial_stride=32,
# patch_size=(1, 4, 4),
stem_channels=8,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
ls_init_value=1e-6,
conv_mlp=False, # whether to transpose channels to last dim inside MLP
stem_type='patch', # supports `s4nd` + avg pool
stem_l_max=None, # len of l_max in stem (if using s4)
downsample_type='patch', # supports `s4nd` + avg pool
downsample_act=False,
downsample_glu=False,
head_init_scale=1.,
head_norm_first=False,
norm_layer=None,
custom_ln=False,
drop_head=0.,
drop_path_rate=0.,
drop_mlp=0.,
layer=None, # Shared config dictionary for the core layer
stem_layer=None,
stage_layers=None,
video_size=None,
block_tempor_kernel=3, # only for non-s4 block
temporal_stage_strides=None,
factor_3d=False,
**kwargs, # catch all
):
super().__init__()
assert output_spatial_stride == 32
if norm_layer is None:
if custom_ln:
norm_layer = TransposedLN
else:
norm_layer = partial(LayerNorm3d, eps=1e-6)
cl_norm_layer = norm_layer if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
cl_norm_layer = norm_layer
self.num_classes = num_classes
self.drop_head = drop_head
self.feature_info = []
# Broadcast dictionaries
if layer is not None:
stage_layers = [OmegaConf.merge(layer, s) for s in stage_layers]
stem_layer = OmegaConf.merge(layer, stem_layer)
# instantiate stem here
self.stem = Stem3d(
stem_type=stem_type, # supports `s4nd` + avg pool
in_chans=in_chans,
spatial_patch_size=spatial_patch_size,
tempor_patch_size=tempor_patch_size,
stem_channels=stem_channels,
dims=dims,
stem_l_max=stem_l_max, # len of l_max in stem (if using s4)
norm_layer=norm_layer,
custom_ln=custom_ln,
layer=layer, # Shared config dictionary for the core layer
stem_layer=stem_layer,
factor_3d=factor_3d,
)
stem_stride = [tempor_patch_size, spatial_patch_size, spatial_patch_size]
prev_chs = dims[0]
# TODO: something else here?
curr_video_size = [
x // stem_stride if isinstance(stem_stride, int) else x // stem_stride[i]
for i, x in enumerate(video_size)
]
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(4):
# if stem downsampled by 4, then in stage 0, we don't downsample
# if stem downsampled by 2, then in stage 0, we downsample by 2
# all other stages we downsample by 2 no matter what
# might want to alter the
# temporal stride, we parse this specially
tempor_stride = temporal_stage_strides[i] if temporal_stage_strides is not None else 2
stride = [1, 1, 1] if i == 0 and np.any(np.array(stem_stride) >= 2) else [tempor_stride, 2, 2] # stride 1 is no downsample (because already ds in stem)
# print("stage {}, before downsampled img size {}, stride {}".format(i, curr_img_size, stride))
out_chs = dims[i]
stages.append(
ConvNeXtStage3D(
prev_chs,
out_chs,
video_size=curr_video_size,
stride=stride,
depth=depths[i],
dp_rates=dp_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
norm_layer=norm_layer,
cl_norm_layer=cl_norm_layer,
stage_layer=stage_layers[i],
block_tempor_kernel=block_tempor_kernel,
downsample_type=downsample_type,
downsample_act=downsample_act,
downsample_glu=downsample_glu,
factor_3d=factor_3d,
drop_mlp=drop_mlp,
)
)
prev_chs = out_chs
# update image size for next stage
curr_video_size = [
x // stride if isinstance(stride, int) else x // stride[i]
for i, x in enumerate(curr_video_size)
]
# # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
# self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = prev_chs
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
self.norm_pre = norm_layer(self.num_features) if head_norm_first else nn.Identity()
self.head = nn.Sequential(OrderedDict([
('global_pool', nn.AdaptiveAvgPool3d(1)),
('norm', nn.Identity() if head_norm_first else norm_layer(self.num_features)),
('flatten', nn.Flatten(1)),
('drop', nn.Dropout(self.drop_head)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())]))
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes=0, **kwargs):
if global_pool is not None:
self.head.global_pool = nn.AdaptiveAvgPool
self.head.flatten = nn.Flatten(1)
self.head.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
# NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :(
x = self.head.global_pool(x)
x = self.head.norm(x)
x = self.head.flatten(x)
x = self.head.drop(x)
return x if pre_logits else self.head.fc(x)
def forward(self, x, state=None):
x = self.forward_features(x)
x = self.forward_head(x)
return x, None
def _create_convnext3d(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
ConvNeXt3D,
variant,
pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs,
)
return model
@register_model
def convnext3d_tiny(pretrained=False, **kwargs):
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs)
model = _create_convnext3d('convnext_tiny', pretrained=pretrained, **model_args)
return model
def convnext_timm_tiny_2d_to_3d(model, state_dict, ignore_head=True, normalize=True):
"""
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
normalize: bool, if set to True (default), it inflates with a factor of 1, and if
set to False it inflates with a factor of 1/T where T is the temporal length for that kernel
return:
state_dict: dict, update with inflated weights
"""
model_scratch_params_dict = dict(model.named_parameters())
prefix = list(state_dict.keys())[0].split('.')[0] # grab prefix in the keys for state_dict params
old_state_dict = copy.deepcopy(state_dict)
# loop through keys (in either)
# only check `weights`
# compare shapes btw 3d model and 2d model
# if, different, then broadcast
# then set the broadcasted version into the model value
for key in sorted(model_scratch_params_dict.keys()):
scratch_params = model_scratch_params_dict[key]
# need to add the predix 'model' in convnext
key_with_prefix = prefix + '.' + key
# make sure key is in the loaded params first, if not, then print it out
loaded_params = state_dict.get(key_with_prefix, None)
if 'time_weight' in key:
print("found time_weight parameter, train from scratch", key)
used_params = scratch_params
elif loaded_params is None:
# This should never happen for 2D -> 3D ConvNext
print("Missing key in pretrained model!", key_with_prefix)
raise Exception
# used_params = scratch_params
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, ignore", key)
used_params = scratch_params
elif len(scratch_params.shape) != len(loaded_params.shape):
# same keys, but inflating weights
print('key: shape DOES NOT MATCH', key)
print("scratch:", scratch_params.shape)
print("pretrain:", loaded_params.shape)
# need the index [-3], 3rd from last, the temporal dim
index = -3
temporal_dim = scratch_params.shape[index] # temporal len of kernel
temporal_kernel_factor = 1 if normalize else 1 / temporal_dim
used_params = repeat(temporal_kernel_factor*loaded_params, '... h w -> ... t h w', t=temporal_dim)
# loaded_params = temporal_kernel_factor * loaded_params.unsqueeze(index) # unsqueeze
# used_params = torch.cat(temporal_dim * [loaded_params], axis=index) # stack at this dim
else:
# print('key: shape MATCH', key) # loading matched weights
# used_params = loaded_params
continue
state_dict[key_with_prefix] = used_params
return state_dict
def convnext_timm_tiny_s4nd_2d_to_3d(model, state_dict, ignore_head=True, jank=False):
"""
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# model_scratch_params_dict = dict(model.named_parameters())
model_scratch_params_dict = {**dict(model.named_parameters()), **dict(model.named_buffers())}
prefix = list(state_dict.keys())[0].split('.')[0] # grab prefix in the keys for state_dict params
new_state_dict = copy.deepcopy(state_dict)
# for key in state_dict.keys():
# print(key)
# breakpoint()
for key in sorted(model_scratch_params_dict.keys()):
# need to add the predix 'model' in convnext
key_with_prefix = prefix + '.' + key
# HACK
old_key_with_prefix = key_with_prefix.replace("inv_w_real", "log_w_real")
# print(key)
# if '.kernel.L' in key:
# print(key, state_dict[old_key_with_prefix])
if '.kernel.0' in key:
# temporal dim is loaded from scratch
print("found .kernel.0:", key)
new_state_dict[key_with_prefix] = model_scratch_params_dict[key]
elif '.kernel.1' in key:
# This is the 1st kernel --> 0th kernel from pretrained model
print("FOUND .kernel.1, putting kernel 0 into kernel 1", key)
new_state_dict[key_with_prefix] = state_dict[old_key_with_prefix.replace(".kernel.1", ".kernel.0")]
elif '.kernel.2' in key:
print("FOUND .kernel.2, putting kernel 1 into kernel 2", key)
new_state_dict[key_with_prefix] = state_dict[old_key_with_prefix.replace(".kernel.2", ".kernel.1")]
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, ignore", key)
new_state_dict[key_with_prefix] = model_scratch_params_dict[key]
# keys match
else:
# check if mismatched shape, if so, need to inflate
# this covers cases where we did not use s4 (eg, optionally use conv2d in downsample or the stem)
try:
if model_scratch_params_dict[key].ndim != state_dict[old_key_with_prefix].ndim:
print("matching keys, but shapes mismatched! Need to inflate!", key)
# need the index [-3], 3rd from last, the temporal dim
index = -3
dim_len = model_scratch_params_dict[key].shape[index]
# loaded_params = state_dict[key_with_prefix].unsqueeze(index) # unsqueeze
# new_state_dict[key_with_prefix] = torch.cat(dim_len * [loaded_params], axis=index) # stack at this dim
new_state_dict[key_with_prefix] = repeat(state_dict[old_key_with_prefix], '... h w -> ... t h w', t=dim_len) # torch.cat(dim_len * [loaded_params], axis=index) # stack at this dim
else:
# matching case, shapes, match, load into new_state_dict as is
new_state_dict[key_with_prefix] = state_dict[old_key_with_prefix]
# something went wrong, the keys don't actually match (and they should)!
except:
print("unmatched key", key)
breakpoint()
# continue
return new_state_dict
if __name__ == '__main__':
model = convnext_tiny(
stem_type='new_s4nd_patch',
stem_channels=32,
stem_l_max=[16, 16],
downsample_type='s4nd',
downsample_glu=True,
stage_layers=[dict(dt_min=0.1, dt_max=1.0)] * 4,
stem_layer=dict(dt_min=0.1, dt_max=1.0, init='fourier'),
layer=dict(
_name_='s4nd',
bidirectional=True,
init='fourier',
dt_min=0.01,
dt_max=1.0,
n_ssm=1,
return_state=False,
),
img_size=[224, 224],
)
# model = convnext_tiny(
# stem_type='patch',
# downsample_type=None,
# stage_layers=[None] * 4,
# img_size=[224, 224],
# )
vmodel = convnext3d_tiny(
stem_type='new_s4nd_patch',
stem_channels=32,
stem_l_max=[100, 16, 16],
downsample_type='s4nd',
downsample_glu=True,
stage_layers=[dict(dt_min=0.1, dt_max=1.0)] * 4,
stem_layer=dict(dt_min=0.1, dt_max=1.0, init='fourier'),
layer=dict(
_name_='s4nd',
bidirectional=True,
init='fourier',
dt_min=0.01,
dt_max=1.0,
n_ssm=1,
contract_version=1,
return_state=False,
),
video_size=[100, 224, 224],
)
# vmodel = convnext3d_tiny(
# stem_type='patch',
# downsample_type=None,
# stage_layers=[None] * 4,
# video_size=[100, 224, 224],
# )
model.cuda()
x = torch.rand(1, 3, 224, 224).cuda()
y = model(x)[0]
print(y)
breakpoint()
vmodel.cuda()
x = torch.rand(1, 3, 50, 224, 224).cuda()
y = vmodel(x)[0]
print(y)
print(y.shape)
breakpoint()
# 3D Stem Conv options
# 1, 4, 4 kernel and stride
# 7, 4, 4 kernel and stride 2, 4, 4
| state-spaces-main | src/models/baselines/convnext_timm.py |
"""Adapted from LipschitzRNN https://github.com/erichson/LipschitzRNN.
Original code left as comments
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from src.models.sequence.base import SequenceModule
from copy import deepcopy
from torchdiffeq import odeint as odeint
def gaussian_init_(n_units, std=1):
sampler = torch.distributions.Normal(torch.Tensor([0]),
torch.Tensor([std / n_units]))
A_init = sampler.sample((n_units, n_units))[..., 0]
return A_init
class LipschitzRNN_ODE(nn.Module):
"""The derivative of the continuous-time RNN, to plug into an integrator."""
def __init__(self, d_model, beta, gamma, init_std):
super().__init__()
self.device = get_device()
self.gamma = gamma
self.beta = beta
self.tanh = nn.Tanh()
self.z = torch.zeros(d_model)
self.C = nn.Parameter(gaussian_init_(d_model, std=init_std))
self.B = nn.Parameter(gaussian_init_(d_model, std=init_std))
self.I = torch.eye(d_model).to(self.device)
self.i = 0
def forward(self, t, h):
"""dh/dt as a function of time and h(t)."""
if self.i == 0:
self.A = self.beta * (self.B - self.B.transpose(1, 0)) + (
1 - self.beta) * (self.B +
self.B.transpose(1, 0)) - self.gamma * self.I
self.W = self.beta * (self.C - self.C.transpose(1, 0)) + (
1 - self.beta) * (self.C +
self.C.transpose(1, 0)) - self.gamma * self.I
return torch.matmul(
h, self.A) + self.tanh(torch.matmul(h, self.W) + self.z)
class RnnModels(SequenceModule): #(nn.Module):
"""Generator of multiple possible general RNN forms."""
@property
def d_output(self): #TODO: check
return self.d_model
def __init__(self,
# d_input,
# d_output,
d_model=128,
chunk=1,
eps=0.01,
beta=0.8,
gamma=0.01,
gated=False,
init_std=1,
alpha=1,
model='LipschitzRNN',
solver='euler',
l_output=0,
l_max=-1,
):
super().__init__()
# self.d_input = d_input
self.d_model = d_model
# self.chunk = chunk
self.eps = eps
self.model = model
self.solver = solver
self.gamma = gamma
self.beta = beta
self.alpha = alpha
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# self.E = nn.Linear(d_input*self.chunk, d_model)
# self.D = nn.Linear(d_model, d_output)
self.register_buffer('I', torch.eye(d_model))
if self.model == 'simpleRNN':
self.W = nn.Linear(d_model, d_model, bias=False)
self.W.weight.data = gaussian_init_(d_model, std=init_std)
elif self.model == 'resRNN':
self.W = nn.Linear(d_model, d_model, bias=False)
self.W.weight.data = gaussian_init_(d_model, std=init_std)
elif self.model == 'asymRNN':
self.C = nn.Parameter(gaussian_init_(d_model, std=init_std))
elif self.model == 'calRNN':
U, _, V = torch.svd(gaussian_init_(d_model, std=init_std))
self.C = nn.Parameter(torch.mm(U, V.t()).float())
elif self.model == 'LipschitzRNN':
self.C = nn.Parameter(gaussian_init_(d_model, std=init_std))
self.B = nn.Parameter(gaussian_init_(d_model, std=init_std))
elif self.model == 'LipschitzRNN_gated':
self.C = nn.Parameter(gaussian_init_(d_model, std=init_std))
self.B = nn.Parameter(gaussian_init_(d_model, std=init_std))
# self.E_gate = nn.Linear(d_input, d_model)
elif self.model == 'LipschitzRNN_ODE':
self.func = LipschitzRNN_ODE(d_model, beta, gamma, init_std)
else:
print("Unexpected model!")
raise NotImplementedError
def step(self, x, state):
# THIS CODE IS UNTESTED
if self.model == 'LipschitzRNN':
if state is None:
A = self.beta * (self.B - self.B.transpose(1, 0)) + (
1 - self.beta) * (self.B + self.B.transpose(
1, 0)) - self.gamma * self.I
W = self.beta * (self.C - self.C.transpose(1, 0)) + (
1 - self.beta) * (self.C + self.C.transpose(
1, 0)) - self.gamma * self.I
state = state + self.eps * self.alpha * torch.matmul(state, A) + \
self.eps * self.tanh(torch.matmul(state, W) + x)
return x, state
def forward(self, x, *args, **kwargs):
# x = x.reshape(x.shape[0], -1, self.d_input*self.chunk)
T = x.shape[1]
h = torch.zeros(x.shape[0], self.d_model, device=x.device)
for i in range(T):
# z = self.E(x[:, i, :])
z = x[:, i, :]
if self.model == 'simpleRNN':
h = self.tanh(self.W(h) + z)
elif self.model == 'resRNN':
h = h + self.eps * self.tanh(self.W(h) + z)
elif self.model == 'asymRNN':
if i == 0:
W = self.C - self.C.transpose(1, 0) - self.gamma * self.I
h = h + self.eps * self.tanh(torch.matmul(h, W) + z)
elif self.model == 'calRNN':
if i == 0:
C = self.C - self.C.transpose(1, 0)
W = torch.matmul(torch.inverse(self.I + C), self.I - C)
h = self.tanh(torch.matmul(h, W) + z)
elif self.model == 'LipschitzRNN':
if i == 0:
A = self.beta * (self.B - self.B.transpose(1, 0)) + (
1 - self.beta) * (self.B + self.B.transpose(
1, 0)) - self.gamma * self.I
W = self.beta * (self.C - self.C.transpose(1, 0)) + (
1 - self.beta) * (self.C + self.C.transpose(
1, 0)) - self.gamma * self.I
h = h + self.eps * self.alpha * torch.matmul(
h, A) + self.eps * self.tanh(torch.matmul(h, W) + z)
elif self.model == 'LipschitzRNN_gated':
if i == 0:
A = self.beta * (self.B - self.B.transpose(1, 0)) + (
1 - self.beta) * (self.B + self.B.transpose(
1, 0)) - self.gamma * self.I
W = self.beta * (self.C - self.C.transpose(1, 0)) + (
1 - self.beta) * (self.C + self.C.transpose(
1, 0)) - self.gamma * self.I
z_gate = self.E_gate(x[:, i, :])
Wh = torch.matmul(h, W)
Ah = torch.matmul(h, A)
q1 = self.alpha * Ah + self.tanh(Wh + z)
q2 = self.sigmoid(Wh + z_gate)
h = h + self.eps * q1 * q2
elif self.model == 'LipschitzRNN_ODE':
self.func.z = z
self.func.i = i
h = odeint(self.func,
h,
torch.tensor([0, self.eps]).float(),
method=self.solver)[-1, :, :]
# Decoder
#----------
# out = self.D(h)
# return out
return h.unsqueeze(1), None
| state-spaces-main | src/models/baselines/lipschitzrnn.py |
"""Implementation of SampleRNN model.
Paper: https://arxiv.org/abs/1612.07837
"""
import torch
import torch.nn.functional as F
from torch.nn import init
import math
import numpy as np
from src.models.baselines.lstm import TorchLSTM
from src.models.baselines.gru import TorchGRU
from src.models.sequence.base import SequenceModule
from src.models.sequence.modules.s4block import S4Block
from src.dataloaders.audio import mu_law_decode, linear_decode, q_zero
class StackedRNN(SequenceModule):
"""
StackedRNN with skip connections:
Input (d_model) -> RNN_1 (d_hidden) -> Linear (d_hidden, d_hidden) -> Output
[Input, RNN_1] (d_model + d_hidden) -> RNN_2 (d_hidden) -> Linear (d_hidden, d_hidden) -> += Output
[Input, RNN_2] (d_model + d_hidden) -> RNN_3 (d_hidden) -> Linear (d_hidden, d_hidden) -> += Output
...
"""
@property
def d_output(self):
return self.d_model if self.output_linear else self.d_hidden
def __init__(
self,
d_model,
d_hidden,
n_layers,
learn_h0=False,
rnn_type='gru',
skip_connections=False,
weight_norm=False,
dropout=0.0,
output_linear=False,
):
super().__init__()
self.d_model = d_model
self.d_hidden = d_hidden
self.n_layers = n_layers
self.learn_h0 = learn_h0
self.skip_connections = skip_connections
self.weight_norm = torch.nn.utils.weight_norm if weight_norm else lambda x: x
self.output_linear = output_linear
self.rnn_layers = torch.nn.ModuleList()
self.lin_layers = torch.nn.ModuleList()
self.dropout_layers = torch.nn.ModuleList()
self.rnn_type = rnn_type
if rnn_type == 'lstm':
RNN = TorchLSTM
elif rnn_type == 'gru':
RNN = TorchGRU
else:
raise ValueError('rnn_type must be lstm or gru')
for i in range(n_layers):
if i == 0:
self.rnn_layers.append(
RNN(d_model=d_model, d_hidden=d_hidden, n_layers=1, learn_h0=learn_h0),
)
else:
if skip_connections:
self.rnn_layers.append(
RNN(d_model=d_model + d_hidden, d_hidden=d_hidden, n_layers=1, learn_h0=learn_h0),
)
else:
self.rnn_layers.append(
RNN(d_model=d_hidden, d_hidden=d_hidden, n_layers=1, learn_h0=learn_h0),
)
if skip_connections:
self.lin_layers.append(self.weight_norm(torch.nn.Linear(d_hidden, d_hidden)))
else:
self.lin_layers.append(torch.nn.Identity())
if dropout > 0.0 and i < n_layers - 1:
self.dropout_layers.append(torch.nn.Dropout(dropout))
else:
self.dropout_layers.append(torch.nn.Identity())
if output_linear:
self.output_layer = self.weight_norm(torch.nn.Linear(d_hidden, d_model))
else:
self.output_layer = torch.nn.Identity()
# Apply weight norm to all the RNN layers
for rnn in self.rnn_layers:
# Find all Linear layers in the RNN
for name, module in rnn.named_modules():
if isinstance(module, torch.nn.Linear):
setattr(rnn, name, self.weight_norm(module))
# Use orthogonal initialization for W_hn if using GRU (weight_hh_l[0])
if rnn_type == 'gru':
for rnn in self.rnn_layers:
torch.nn.init.orthogonal_(rnn.weight_hh_l0[2 * d_hidden:].data)
def default_state(self, *batch_shape, device=None):
return [
rnn.default_state(*batch_shape, device=device)
for rnn in self.rnn_layers
]
def forward(self, inputs, *args, state=None, **kwargs):
outputs = inputs
prev_states = [None] * len(self.rnn_layers) if state is None else state
next_states = []
out = 0.
for rnn, prev_state, lin, dropout in zip(self.rnn_layers, prev_states, self.lin_layers, self.dropout_layers):
# Run RNN on inputs
outputs, state = rnn(outputs, prev_state)
next_states.append(state)
# If dropout, only apply to the outputs of RNNs that are not the last one (like torch's LSTM)
outputs = dropout(outputs)
z = lin(outputs)
if self.skip_connections:
# If skip connections, add the outputs of all the RNNs to the outputs
out += z
# Feed in the outputs of the previous RNN, and the original inputs to the next RNN
outputs = torch.cat([outputs, inputs], dim=-1)
else:
out = z
outputs = z
out = self.output_layer(out)
return out, next_states
class StackedRNNBaseline(SequenceModule):
"""Standard stacked RNN baseline in SampleRNN paper.
Marked as the "one_tier" model in the codebase.
https://github.com/soroushmehr/sampleRNN_ICLR2017/blob/master/models/one_tier/one_tier.py
Discrete Input (Q_LEVELS) -->
Embedding (EMB_SIZE) -->
----------- (start) this module implements the RNN + Linear Layers backbone -----------
StackedRNN (N_RNN \in [5], FRAME_SIZE, DIM, LEARNED_H0, WEIGHT_NORM, SKIP_CONNECTIONS) -->
Linear (DIM, DIM) + ReLU -->
Linear (DIM, DIM) + ReLU -->
Linear (DIM, DIM) + ReLU -->
----------- (end) this module implements the RNN + Linear Layers backbone -----------
Linear (DIM, Q_LEVELS)
"""
@property
def d_output(self):
return self.d_hidden
def __init__(
self,
d_model,
d_hidden,
n_layers,
learn_h0=False,
rnn_type='gru',
weight_norm=False,
skip_connections=True,
dropout=0.0,
):
super().__init__()
self.d_model = d_model
self.d_hidden = d_hidden
self.n_layers = n_layers
self.learn_h0 = learn_h0
self.weight_norm = weight_norm
self.skip_connections = skip_connections
self.rnn_type = rnn_type
self.rnn = StackedRNN(
d_model=d_model,
d_hidden=d_hidden,
n_layers=n_layers,
rnn_type=rnn_type,
skip_connections=skip_connections,
weight_norm=weight_norm,
dropout=dropout,
output_linear=False,
)
self.lin1 = torch.nn.Linear(d_hidden, d_hidden)
self.lin2 = torch.nn.Linear(d_hidden, d_hidden)
self.lin3 = torch.nn.Linear(d_hidden, d_hidden)
if weight_norm:
self.lin1 = torch.nn.utils.weight_norm(self.lin1)
self.lin2 = torch.nn.utils.weight_norm(self.lin2)
self.lin3 = torch.nn.utils.weight_norm(self.lin3)
def default_state(self, *batch_shape, device=None):
return self.rnn.default_state(*batch_shape, device=device)
def forward(self, inputs, *args, state=None, **kwargs):
outputs = inputs
outputs, state = self.rnn(outputs, state)
outputs = F.relu(self.lin1(outputs))
outputs = F.relu(self.lin2(outputs))
outputs = F.relu(self.lin3(outputs))
return outputs, state
class LearnedUpsampling1d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
super().__init__()
self.conv_t = torch.nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=kernel_size,
bias=False,
)
if bias:
self.bias = torch.nn.Parameter(
torch.FloatTensor(out_channels, kernel_size)
)
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
self.conv_t.reset_parameters()
torch.nn.init.constant(self.bias, 0)
def forward(self, input):
(batch_size, _, length) = input.size()
(kernel_size,) = self.conv_t.kernel_size
bias = self.bias.unsqueeze(0).unsqueeze(2).expand(
batch_size, self.conv_t.out_channels, length, kernel_size
).contiguous().view(
batch_size, self.conv_t.out_channels,
length * kernel_size
)
return self.conv_t(input) + bias
class SampleRNN(SequenceModule):
"""SampleRNN model.
Implementation adapted from https://github.com/deepsound-project/samplernn-pytorch.
"""
@property
def d_output(self):
return self.d_hidden
def __init__(
self,
frame_sizes=(16, 4),
n_rnn=2,
d_hidden=1024,
bits=8,
learn_h0=True,
d_model=256,
weight_norm=True,
reproduce=True,
quantization='linear',
layer='gru',
):
super().__init__()
self.d_hidden = d_hidden
self.d_model = d_model
self.reproduce = reproduce
self.bits = bits
self.quantization = quantization
self.layer = layer
if self.quantization == 'linear':
self.dequantizer = linear_decode
elif self.quantization == 'mu-law':
self.dequantizer = mu_law_decode
else:
raise ValueError(f"Unknown quantization type: {self.quantization}")
if not self.reproduce:
self.encoder = torch.nn.Embedding(1 << bits, d_model)
ns_frame_samples = map(int, np.cumprod(frame_sizes)) # e.g. (16, 4) -> (16, 64)
self.frame_level_rnns = torch.nn.ModuleList([
FrameLevelRNN(
frame_size=frame_size,
n_frame_samples=n_frame_samples,
d_model=d_model,
n_rnn=n_rnn,
d_hidden=d_hidden,
learn_h0=learn_h0,
weight_norm=weight_norm,
reproduce=reproduce,
layer=layer,
)
for (frame_size, n_frame_samples) in zip(frame_sizes, ns_frame_samples)
])
self.sample_level_mlp = SampleLevelMLP(
frame_size=frame_sizes[0],
d_hidden=d_hidden,
bits=bits,
d_model=d_model,
weight_norm=weight_norm,
reproduce=reproduce,
)
def default_state(self, batch_size, device=None):
self._reset_state=True # Special hacks for SampleRNN
return [rnn.default_state(batch_size, device=device) for rnn in self.frame_level_rnns]
def step(self, x, state=None, *args, **kwargs):
if len(x.shape) == 1:
x = x.unsqueeze(1)
batch_size = x.shape[0]
assert state is not None, "SampleRNN: State should be constructed with default_state before forward pass"
if self._reset_state: # Hacks for SampleRNN
self._reset_state = False
# state = self.default_state(batch_size, device=x.device)
self._frame_level_outputs = [None for _ in self.frame_level_rnns]
self._window = torch.zeros(
batch_size,
self.lookback,
x.shape[1] if len(x.shape) == 2 else x.shape[2],
dtype=x.dtype,
device=x.device,
) + q_zero(bits=self.bits)
self._step_idx = self.lookback
if len(x.shape) == 3:
assert x.shape[1] == self.lookback
self._window = x
if self._step_idx > self.lookback:
# Update window (but on the first step)
self._window[:, :-1] = self._window[:, 1:].clone()
self._window[:, -1] = x
new_states = []
for (i, rnn), state_ in zip(reversed(list(enumerate(self.frame_level_rnns))), reversed(state)):
if self._step_idx % rnn.n_frame_samples != 0:
# Don't need to process this rnn
new_states.append(state_)
continue
# prev_samples shape: (B, CHUNK_SIZE, D) e.g. (16, 16384, 1)
prev_samples = self._window[:, -rnn.n_frame_samples:]
if self.reproduce:
# SampleRNN dequantizes to recover the raw audio signal before passing this to the RNN
prev_samples = self.dequantizer(prev_samples, bits=self.bits)
prev_samples = 2 * prev_samples.contiguous()
# Below, reshape from (B, CHUNK_SIZE, D) -> (B, -1, rnn.n_frame_samples) = (B, M_i, F_i)
# e.g. (16, 16384, 1) -> (16, 256, 64) [first rnn] | (16, 1024, 16) [second rnn]
prev_samples = prev_samples.view(batch_size, -1, rnn.n_frame_samples)
else:
raise NotImplementedError
# More generally, we can use an Embedding encoder instead
prev_samples = self.encoder(prev_samples)
prev_samples = prev_samples.contiguous()
prev_samples = prev_samples.view(batch_size, -1, rnn.n_frame_samples, self.d_model)
# upper_tier_conditioning shape: None -> (B, M, D_HIDDEN) [first rnn]
# (B, M_{i-1}, D_HIDDEN) -> (B, M_i, D_HIDDEN) [second rnn]
if i == len(self.frame_level_rnns) - 1:
upper_tier_conditioning = None
else:
frame_index = (self._step_idx // rnn.n_frame_samples) % self.frame_level_rnns[i + 1].frame_size
upper_tier_conditioning = self._frame_level_outputs[i + 1][:, frame_index, :].unsqueeze(1)
upper_tier_conditioning, new_state = rnn(prev_samples, upper_tier_conditioning, state_)
self._frame_level_outputs[i] = upper_tier_conditioning
new_states.append(new_state)
# Make sure new states are in the right order
new_states = list(reversed(new_states))
bottom_frame_size = self.frame_level_rnns[0].frame_size
mlp_input_sequences = self._window[:, -bottom_frame_size:]
# Upper tier conditioning for the bottom
upper_tier_conditioning = self._frame_level_outputs[0][:, self._step_idx % bottom_frame_size, :].unsqueeze(1)
y = self.sample_level_mlp(mlp_input_sequences, upper_tier_conditioning)
# Update window and step
self._step_idx += 1
# mlp_input_sequences shape: (B, L - _, D) e.g. (16, 16399, 1)
# upper_tier_conditioning shape: (B, M_{last_rnn}, D_HIDDEN) [last rnn]
return y.squeeze(1), new_states # (B, D)
@property
def lookback(self):
return self.frame_level_rnns[-1].n_frame_samples
def forward(self, inputs, *args, state=None, **kwargs):
"""
inputs shape: (B, L, D) e.g. (16, 16447, 1)
For SampleRNN, inputs contains quantized audio samples (e.g. B elements of length L)
"""
batch_size = inputs.shape[0]
assert state is not None, "SampleRNN: State should be constructed with default_state before forward pass"
upper_tier_conditioning = None
new_states = []
for rnn, state_ in zip(reversed(self.frame_level_rnns), reversed(state)):
# TODO: explain this
from_index = self.lookback - rnn.n_frame_samples
to_index = -rnn.n_frame_samples + 1
# prev_samples shape: (B, CHUNK_SIZE, D) e.g. (16, 16384, 1)
prev_samples = inputs[:, from_index : to_index]
if self.reproduce:
# SampleRNN dequantizes to recover the raw audio signal before passing this to the RNN
prev_samples = self.dequantizer(prev_samples, bits=self.bits)
prev_samples = 2 * prev_samples.contiguous()
# Below, reshape from (B, CHUNK_SIZE, D) -> (B, -1, rnn.n_frame_samples) = (B, M_i, F_i)
# e.g. (16, 16384, 1) -> (16, 256, 64) [first rnn] | (16, 1024, 16) [second rnn]
prev_samples = prev_samples.view(batch_size, -1, rnn.n_frame_samples)
else:
# More generally, we can use an Embedding encoder instead
prev_samples = self.encoder(prev_samples)
prev_samples = prev_samples.contiguous()
prev_samples = prev_samples.view(batch_size, -1, rnn.n_frame_samples, self.d_model)
# upper_tier_conditioning shape: None -> (B, M, D_HIDDEN) [first rnn]
# (B, M_{i-1}, D_HIDDEN) -> (B, M_i, D_HIDDEN) [second rnn]
upper_tier_conditioning, new_state = rnn(prev_samples, upper_tier_conditioning, state_)
new_states.append(new_state)
# Make sure new states are in the right order
new_states = list(reversed(new_states))
bottom_frame_size = self.frame_level_rnns[0].frame_size
mlp_input_sequences = inputs[:, self.lookback - bottom_frame_size : ]
# mlp_input_sequences shape: (B, L - _, D) e.g. (16, 16399, 1)
# upper_tier_conditioning shape: (B, M_{last_rnn}, D_HIDDEN) [last rnn]
return self.sample_level_mlp(mlp_input_sequences, upper_tier_conditioning), new_states
def lecun_uniform(tensor):
fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in')
torch.nn.init.uniform(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))
def concat_init(tensor, inits):
try:
tensor = tensor.data
except AttributeError:
pass
(length, fan_out) = tensor.size()
fan_in = length // len(inits)
chunk = tensor.new(fan_in, fan_out)
for (i, init) in enumerate(inits):
init(chunk)
tensor[i * fan_in : (i + 1) * fan_in, :] = chunk
class FrameLevelRNN(torch.nn.Module):
def __init__(
self,
frame_size,
n_frame_samples,
d_model,
n_rnn,
d_hidden,
learn_h0=True,
weight_norm=True,
reproduce=False,
layer='gru',
):
super().__init__()
self.frame_size = frame_size
self.n_frame_samples = n_frame_samples
self.d_model = d_model
self.d_hidden = d_hidden
self.n_rnn = n_rnn
self.learn_h0 = learn_h0
self.weight_norm = weight_norm
self.reproduce = reproduce
self.layer = layer
if self.reproduce:
assert learn_h0, "Original SampleRNN FrameLevelRNN learns h0."
assert weight_norm, "Original SampleRNN FrameLevelRNN uses weight norm."
if reproduce:
self.input_expand = torch.nn.Conv1d(
in_channels=n_frame_samples,
out_channels=d_hidden,
kernel_size=1,
)
torch.nn.init.kaiming_uniform(self.input_expand.weight)
torch.nn.init.constant(self.input_expand.bias, 0)
else:
self.input_expand = torch.nn.Conv1d(
in_channels=d_model,
out_channels=d_hidden,
kernel_size=n_frame_samples,
stride=n_frame_samples,
)
if self.layer == 'gru':
self.rnn = TorchGRU(
d_model=d_hidden,
d_hidden=d_hidden,
n_layers=n_rnn,
learn_h0=learn_h0,
)
elif self.layer == 's4':
self.rnn = S4Block(
H=d_hidden,
d_state=64,
use_state=False,
)
if reproduce:
if self.layer == 'gru':
for i in range(n_rnn):
concat_init(
getattr(self.rnn, 'weight_ih_l{}'.format(i)),
[lecun_uniform, lecun_uniform, lecun_uniform]
)
torch.nn.init.constant(getattr(self.rnn, 'bias_ih_l{}'.format(i)), 0)
concat_init(
getattr(self.rnn, 'weight_hh_l{}'.format(i)),
[lecun_uniform, lecun_uniform, torch.nn.init.orthogonal]
)
torch.nn.init.constant(getattr(self.rnn, 'bias_hh_l{}'.format(i)), 0)
self.upsampling = LearnedUpsampling1d(
in_channels=d_hidden,
out_channels=d_hidden,
kernel_size=frame_size,
)
torch.nn.init.uniform(
self.upsampling.conv_t.weight, -np.sqrt(6 / d_hidden), np.sqrt(6 / d_hidden)
)
torch.nn.init.constant(self.upsampling.bias, 0)
else:
self.upsampling = torch.nn.ConvTranspose1d(
in_channels=d_hidden,
out_channels=d_hidden,
kernel_size=frame_size,
stride=frame_size,
bias=True,
)
if weight_norm and reproduce:
self.input_expand = torch.nn.utils.weight_norm(self.input_expand)
self.upsampling.conv_t = torch.nn.utils.weight_norm(self.upsampling.conv_t)
else:
self.input_expand = torch.nn.utils.weight_norm(self.input_expand)
self.upsampling = torch.nn.utils.weight_norm(self.upsampling)
def default_state(self, batch_size, device=None):
if self.layer == 'gru':
return self.rnn.default_state(batch_size, device=device)
elif self.layer == 's4':
return None
def forward(self, prev_samples, upper_tier_conditioning, state=None):
"""
prev_samples: (B, M_i, D_MODEL) if self.reproduce else (B, M_i, FRAME, D_MODEL)
upper_tier_conditioning: (B, M_i, D_HIDDEN) or None
"""
if not self.reproduce:
# Use strided convolutions to get frame embeddings
# This generalizes the SampleRNN operation to handle non-1D signals
# This reshapes from (B, M_i, FRAME, D_MODEL) -> (B, M_i, D_HIDDEN)
prev_samples = prev_samples.view(prev_samples.shape[0], -1, self.d_model)
input = self.input_expand(prev_samples.permute(0, 2, 1)).permute(0, 2, 1)
else:
# SampleRNN uses an MLP (implemented as 1D Conv) to map (FRAME_SIZE, 1) to D_HIDDEN
# This reshapes from (B, M_i, FRAME) -> (B, M_i, D_HIDDEN)
input = self.input_expand(prev_samples.permute(0, 2, 1)).permute(0, 2, 1)
if upper_tier_conditioning is not None:
input += upper_tier_conditioning
# Run RNN: (B, M_i, D_HIDDEN) -> (B, M_i, D_HIDDEN)
if self.layer == 'gru':
output, state = self.rnn(input, state.contiguous())
elif self.layer == 's4':
# TODO: not working
output, state = self.rnn(input.transpose(1, 2), state)
output = output.transpose(1, 2)
# Run 1D transposed convolution to upsample: (B, M_i, D_HIDDEN) -> (B, M', D_HIDDEN)
# TODO: make M' more precise
output = self.upsampling(output.permute(0, 2, 1)).permute(0, 2, 1)
return output, state
class SampleLevelMLP(torch.nn.Module):
def __init__(
self,
frame_size,
d_hidden,
bits=8,
d_model=256,
weight_norm=True,
embedding=True,
reproduce=False,
):
super().__init__()
self.d_model = d_model
self.reproduce = reproduce
if self.reproduce:
assert embedding, "Original SampleRNN SampleLevelMLP uses an embedding layer."
assert weight_norm, "Original SampleRNN SampleLevelMLP uses weight norm."
if embedding:
self.embedding = torch.nn.Embedding(1 << bits, d_model)
self.input = torch.nn.Conv1d(
in_channels=d_model,
out_channels=d_hidden,
kernel_size=frame_size,
bias=False,
)
if self.reproduce:
self.hidden = torch.nn.Conv1d(
in_channels=d_hidden,
out_channels=d_hidden,
kernel_size=1,
)
else:
self.hidden = torch.nn.Linear(d_hidden, d_hidden)
if self.reproduce:
self.output = torch.nn.Conv1d(
in_channels=d_hidden,
out_channels=256,
kernel_size=1,
)
else:
self.output = torch.nn.Linear(d_hidden, 256)
if self.reproduce:
torch.nn.init.kaiming_uniform(self.input.weight)
torch.nn.init.kaiming_uniform(self.hidden.weight)
torch.nn.init.constant(self.hidden.bias, 0)
lecun_uniform(self.output.weight)
torch.nn.init.constant(self.output.bias, 0)
if weight_norm:
self.input = torch.nn.utils.weight_norm(self.input)
self.hidden = torch.nn.utils.weight_norm(self.hidden)
self.output = torch.nn.utils.weight_norm(self.output)
def forward(self, prev_samples, upper_tier_conditioning):
if self.embedding:
# Embed the input samples (which are quantized)
# This reshapes from (B, L, 1) -> (B, L, D_MODEL)
prev_samples = self.embedding(
prev_samples.contiguous().view(-1)
).view(prev_samples.shape[0], -1, self.d_model)
assert prev_samples.shape[-1] == self.d_model, "`prev_samples` shape should be (B, L', D_MODEL)"
# prev_samples: (B, L', D_MODEL) -> (B, D_MODEL, L')
# upper_tier_conditioning: (B, L, D_HIDDEN) -> (B, D_HIDDEN, L)
prev_samples = prev_samples.permute(0, 2, 1)
upper_tier_conditioning = upper_tier_conditioning.permute(0, 2, 1)
if self.reproduce:
# Take (B, L', D_MODEL), (B, L, D_HIDDEN) -> (B, D_HIDDEN, L)
x = F.relu(self.input(prev_samples) + upper_tier_conditioning)
x = F.relu(self.hidden(x))
x = self.output(x).permute(0, 2, 1)
else:
# Take (B, L', D_MODEL), (B, L, D_HIDDEN) -> (B, D_HIDDEN, L)
x = F.relu(self.input(prev_samples) + upper_tier_conditioning)
# x: (B, D_HIDDEN, L) -> (B, L, D_HIDDEN)
x = x.permute(0, 2, 1)
x = F.relu(self.hidden(x))
x = self.output(x)
return x.contiguous()
| state-spaces-main | src/models/baselines/samplernn.py |
"""Wrapper around nn.LSTM to make it compatible with our RNN interface."""
import torch
from torch import nn
from src.models.sequence import SequenceModule, TransposedModule
from einops import rearrange
import src.models.nn.utils as U
@TransposedModule
class TorchLSTM(nn.LSTM, SequenceModule):
""" Wrapper around nn.LSTM to make it compatible with our RNN interface """
def __init__(self, d_model, d_hidden, n_layers=1, learn_h0=False, **kwargs):
# Rename input_size, hidden_size to d_input, d_model
# Set batch_first as default as per this codebase's convention
self.d_model = d_model
self.d_hidden = d_hidden
self.n_layers = n_layers
self.learn_h0 = learn_h0
super().__init__(d_model, d_hidden, num_layers=n_layers, batch_first=True, **kwargs)
self.num_directions = 2 if self.bidirectional else 1
self.real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
if learn_h0:
self.h0 = nn.Parameter(torch.zeros(self.num_layers * self.num_directions, 1, self.real_hidden_size))
self.c0 = nn.Parameter(torch.zeros(self.num_layers * self.num_directions, 1, self.hidden_size))
# def forward(self, x, state=None, **kwargs):
# # Note that state is (h_n, c_n)
# y = super().forward(x, state)
# return y, state
def step(self, x, state):
raise NotImplementedError("Needs to be implemented.")
def default_state(self, *batch_shape, device=None):
"""
Snippet from nn.LSTM source
# https://pytorch.org/docs/stable/_modules/torch/nn/modules/rnn.html#LSTM
"""
if not self.learn_h0:
h_zeros = torch.zeros(self.num_layers * self.num_directions,
*batch_shape, self.real_hidden_size,
dtype=torch.float, device=device)
c_zeros = torch.zeros(self.num_layers * self.num_directions,
*batch_shape, self.hidden_size,
dtype=torch.float, device=device)
else:
h_zeros = self.h0.expand(self.num_layers * self.num_directions, *batch_shape, self.real_hidden_size)
c_zeros = self.c0.expand(self.num_layers * self.num_directions, *batch_shape, self.hidden_size)
return (h_zeros, c_zeros)
@property
def d_state(self):
return self.n_layers * self.d_model
@property
def d_output(self):
return self.d_hidden
@property
def state_to_tensor(self):
if self.n_layers == 1:
return lambda state: state[0]
else:
return lambda state: rearrange(state[0], 'd b h -> b (d h)')
| state-spaces-main | src/models/baselines/lstm.py |
"""Wrapper around nn.GRU to make it compatible with our RNN interface. Similar to lstm.TorchLSTM."""
import torch
from torch import nn
from src.models.sequence import SequenceModule, TransposedModule
from einops import rearrange
import src.models.nn.utils as U
@TransposedModule
class TorchGRU(nn.GRU, SequenceModule):
""" Wrapper around nn.GRU to make it compatible with our RNN interface """
def __init__(self, d_model, d_hidden, n_layers=1, learn_h0=False, **kwargs):
# Rename input_size, hidden_size to d_input, d_model
# Set batch_first as default as per this codebase's convention
self.d_model = d_model
self.d_hidden = d_hidden
self.n_layers = n_layers
self.learn_h0 = learn_h0
super().__init__(d_model, d_hidden, num_layers=n_layers, batch_first=True, **kwargs)
self.num_directions = 2 if self.bidirectional else 1
if self.learn_h0:
self.h0 = nn.Parameter(torch.zeros(self.num_layers * self.num_directions, 1, self.hidden_size))
def step(self, x, state):
raise NotImplementedError
def default_state(self, *batch_shape, device=None):
"""
Snippet from nn.LSTM source
# https://pytorch.org/docs/stable/_modules/torch/nn/modules/rnn.html#LSTM
"""
if not self.learn_h0:
h_zeros = torch.zeros(self.num_layers * self.num_directions,
*batch_shape, self.hidden_size,
dtype=torch.float, device=device)
else:
h_zeros = self.h0.expand(self.num_layers * self.num_directions, *batch_shape, self.hidden_size)
return h_zeros
@property
def d_state(self):
return self.n_layers * self.d_hidden
@property
def d_output(self):
return self.d_hidden
@property
def state_to_tensor(self):
if self.n_layers == 1:
return lambda state: state[0]
else:
return lambda state: rearrange(state[0], 'd b h -> b (d h)')
| state-spaces-main | src/models/baselines/gru.py |
"""Implementation of Continuous Kernel Convolution (CKConv).
Paper: https://arxiv.org/abs/2102.02611
Adapted directly from https://github.com/dwromero/ckconv.
"""
from typing import Tuple, Optional
import numpy as np
import torch
import torch.fft
import torch.fft
import torch.nn as nn
import torch.nn.functional as f
from torch.nn.utils import weight_norm
def Linear1d(
in_channels: int,
out_channels: int,
stride: int = 1,
bias: bool = True,
) -> torch.nn.Module:
"""
Implements a Linear Layer in terms of a point-wise convolution.
"""
return nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)
def Linear2d(
in_channels: int,
out_channels: int,
stride: int = 1,
bias: bool = True,
) -> torch.nn.Module:
"""
Implements a Linear Layer in terms of a point-wise convolution.
"""
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)
def Swish():
"""
out = x * sigmoid(x)
"""
return Expression(lambda x: x * torch.sigmoid(x))
def Sine():
"""
out = sin(x)
"""
return Expression(lambda x: torch.sin(x))
class LayerNorm(nn.Module):
def __init__(
self,
num_channels: int,
eps: float = 1e-12,
):
"""Uses GroupNorm implementation with group=1 for speed."""
super().__init__()
# we use GroupNorm to implement this efficiently and fast.
self.layer_norm = torch.nn.GroupNorm(1, num_channels=num_channels, eps=eps)
def forward(self, x):
return self.layer_norm(x)
# From LieConv
class Expression(torch.nn.Module):
def __init__(self, func):
"""
Creates a torch.nn.Module that applies the function func.
:param func: lambda function
"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
def Multiply(
omega_0: float,
):
"""
out = omega_0 * x
"""
return Expression(lambda x: omega_0 * x)
def causal_padding(
x: torch.Tensor,
kernel: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
# 1. Pad the input signal & kernel tensors.
# Check if sizes are odd. If not, add a pad of zero to make them odd.
if kernel.shape[-1] % 2 == 0:
kernel = f.pad(kernel, [1, 0], value=0.0)
# x = torch.nn.functional.pad(x, [1, 0], value=0.0)
# 2. Perform padding on the input so that output equals input in length
x = f.pad(x, [kernel.shape[-1] - 1, 0], value=0.0)
return x, kernel
def causal_conv(
x: torch.Tensor,
kernel: torch.Tensor,
bias: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Args:
x: (Tensor) Input tensor to be convolved with the kernel.
kernel: (Tensor) Convolution kernel.
bias: (Optional, Tensor) Bias tensor to add to the output.
padding: (int) Number of zero samples to pad the input on the last dimension.
Returns:
(Tensor) Convolved tensor
"""
x, kernel = causal_padding(x, kernel)
return torch.nn.functional.conv1d(x, kernel, bias=bias, padding=0)
def causal_fftconv(
x: torch.Tensor,
kernel: torch.Tensor,
bias: Optional[torch.Tensor] = None,
double_precision: bool = False,
) -> torch.Tensor:
"""
Args:
x: (Tensor) Input tensor to be convolved with the kernel.
kernel: (Tensor) Convolution kernel.
bias: (Optional, Tensor) Bias tensor to add to the output.
padding: (int) Number of zero samples to pad the input on the last dimension.
Returns:
(Tensor) Convolved tensor
"""
x_shape = x.shape
# 1. Handle padding of the input and the kernel to make them odd.
x, kernel = causal_padding(x, kernel)
# 2. Pad the kernel tensor to make them equally big. Required for fft.
kernel = f.pad(kernel, [0, x.size(-1) - kernel.size(-1)])
# 3. Perform fourier transform
if double_precision:
# We can make usage of double precision to make more accurate approximations of the convolution response.
x = x.double()
kernel = kernel.double()
x_fr = torch.fft.rfft(x, dim=-1)
kernel_fr = torch.fft.rfft(kernel, dim=-1)
# 4. Multiply the transformed matrices:
# (Input * Conj(Kernel)) = Correlation(Input, Kernel)
kernel_fr = torch.conj(kernel_fr)
output_fr = (x_fr.unsqueeze(1) * kernel_fr.unsqueeze(0)).sum(
2
) # 'ab..., cb... -> ac...'
# 5. Compute inverse FFT, and remove extra padded values
# Once we are back in the spatial domain, we can go back to float precision, if double used.
out = torch.fft.irfft(output_fr, dim=-1).float()
out = out[:, :, : x_shape[-1]]
# 6. Optionally, add a bias term before returning.
if bias is not None:
out = out + bias.view(1, -1, 1)
return out
class KernelNet(torch.nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
hidden_channels: int,
activation_function: str,
norm_type: str,
dim_linear: int,
bias: bool,
omega_0: float,
weight_dropout: float,
):
"""
Creates an 3-layer MLP, which parameterizes a convolutional kernel as:
relative positions -> hidden_channels -> hidden_channels -> in_channels * out_channels
:param in_channels: Dimensionality of the relative positions (Default: 1).
:param out_channels: input channels * output channels of the resulting convolutional kernel.
:param hidden_channels: Number of hidden units.
:param activation_function: Activation function used.
:param norm_type: Normalization type used.
:param dim_linear: Spatial dimension of the input, e.g., for audio = 1, images = 2 (only 1 suported).
:param bias: If True, adds a learnable bias to the layers.
:param omega_0: Value of the omega_0 value (only used in Sine networks).
:param weight_dropout: Dropout rate applied to the sampled convolutional kernel.
"""
super().__init__()
is_siren = activation_function == "Sine"
w_dp = weight_dropout != 0.0
Norm = {
"BatchNorm": torch.nn.BatchNorm1d,
"LayerNorm": LayerNorm,
"": torch.nn.Identity,
}[norm_type]
ActivationFunction = {
"ReLU": torch.nn.ReLU,
"LeakyReLU": torch.nn.LeakyReLU,
"Swish": Swish,
"Sine": Sine,
}[activation_function]
Linear = {1: Linear1d, 2: Linear2d}[dim_linear]
self.kernel_net = torch.nn.Sequential(
weight_norm(Linear(in_channels, hidden_channels, bias=bias)),
Multiply(omega_0) if is_siren else torch.nn.Identity(),
Norm(hidden_channels) if not is_siren else torch.nn.Identity(),
ActivationFunction(),
weight_norm(Linear(hidden_channels, hidden_channels, bias=bias)),
Multiply(omega_0) if is_siren else torch.nn.Identity(),
Norm(hidden_channels) if not is_siren else torch.nn.Identity(),
ActivationFunction(),
weight_norm(Linear(hidden_channels, out_channels, bias=bias)),
torch.nn.Dropout(p=weight_dropout) if w_dp else torch.nn.Identity(),
)
# initialize the kernel function
self.initialize(
mean=0.0,
variance=0.01,
bias_value=0.0,
is_siren=(activation_function == "Sine"),
omega_0=omega_0,
)
def forward(self, x):
return self.kernel_net(x)
def initialize(self, mean, variance, bias_value, is_siren, omega_0):
if is_siren:
# Initialization of SIRENs
net_layer = 1
for (i, m) in enumerate(self.modules()):
if (
isinstance(m, torch.nn.Conv1d)
or isinstance(m, torch.nn.Conv2d)
or isinstance(m, torch.nn.Linear)
):
if net_layer == 1:
m.weight.data.uniform_(
-1, 1
) # Normally (-1, 1) / in_dim but we only use 1D inputs.
# Important! Bias is not defined in original SIREN implementation!
net_layer += 1
else:
m.weight.data.uniform_(
-np.sqrt(6.0 / m.weight.shape[1]) / omega_0,
# the in_size is dim 2 in the weights of Linear and Conv layers
np.sqrt(6.0 / m.weight.shape[1]) / omega_0,
)
# Important! Bias is not defined in original SIREN implementation
if m.bias is not None:
m.bias.data.uniform_(-1.0, 1.0)
else:
# Initialization of ReLUs
net_layer = 1
intermediate_response = None
for (i, m) in enumerate(self.modules()):
if (
isinstance(m, torch.nn.Conv1d)
or isinstance(m, torch.nn.Conv2d)
or isinstance(m, torch.nn.Linear)
):
m.weight.data.normal_(
mean,
variance,
)
if m.bias is not None:
if net_layer == 1:
# m.bias.data.fill_(bias_value)
range = torch.linspace(-1.0, 1.0, steps=m.weight.shape[0])
bias = -range * m.weight.data.clone().squeeze()
m.bias = torch.nn.Parameter(bias)
intermediate_response = [
m.weight.data.clone(),
m.bias.data.clone(),
]
net_layer += 1
elif net_layer == 2:
range = torch.linspace(-1.0, 1.0, steps=m.weight.shape[0])
range = range + (range[1] - range[0])
range = (
range * intermediate_response[0].squeeze()
+ intermediate_response[1]
)
bias = -torch.einsum(
"oi, i -> o", m.weight.data.clone().squeeze(), range
)
m.bias = torch.nn.Parameter(bias)
net_layer += 1
else:
m.bias.data.fill_(bias_value)
class CKConv(torch.nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
hidden_channels: int,
activation_function: str,
norm_type: str,
dim_linear: int,
bias: bool,
omega_0: float,
weight_dropout: float,
):
"""
Creates a Continuous Kernel Convolution.
:param in_channels: Number of channels in the input signal
:param out_channels: Number of channels produced by the convolution
:param hidden_channels: Number of hidden units in the network parameterizing the ConvKernel (KernelNet).
:param activation_function: Activation function used in KernelNet.
:param norm_type: Normalization type used in KernelNet. (only for non-Sine KernelNets).
:param dim_linear: patial dimension of the input, e.g., for audio = 1, images = 2 (only 1 suported).
:param bias: If True, adds a learnable bias to the output.
:param omega_0: Value of the omega_0 value of the KernelNet. (only for non-Sine KernelNets).
:param weight_dropout: Dropout rate applied to the sampled convolutional kernels.
"""
super().__init__()
self.Kernel = KernelNet(
dim_linear,
out_channels * in_channels,
hidden_channels,
activation_function,
norm_type,
dim_linear,
bias,
omega_0,
weight_dropout,
)
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
self.bias.data.fill_(value=0.0)
else:
self.bias = None
# Non-persistent values
self.rel_positions = None
self.sigma = None
self.sr_change = 1.0
self.register_buffer("train_length", torch.zeros(1).int(), persistent=True)
self.register_buffer("conv_kernel", torch.zeros(in_channels), persistent=False)
def forward(self, x):
# Construct kernel
x_shape = x.shape
rel_pos = self.handle_rel_positions(x)
conv_kernel = self.Kernel(rel_pos).view(-1, x_shape[1], *x_shape[2:])
# ---- Different samling rate --------
# If freq test > freq test, smooth out high-freq elements.
if self.sigma is not None:
from math import pi, sqrt, exp
n = int(1 / self.sr_change) * 2 + 1
h = n // 2
G = (
lambda x: 1
/ (self.sigma * sqrt(2 * pi))
* exp(-float(x) ** 2 / (2 * self.sigma ** 2))
)
smoothing_ker = [G(x) for x in range(-h, h + 1)]
smoothing_ker = torch.Tensor(smoothing_ker).cuda().unsqueeze(0).unsqueeze(0)
conv_kernel[:, :, h:-h] = torch.conv1d(
conv_kernel.view(-1, 1, *x_shape[2:]), smoothing_ker, padding=0
).view(*conv_kernel.shape[:-1], -1)
# multiply by the sr_train / sr_test
if self.sr_change != 1.0:
conv_kernel *= self.sr_change
# ------------------------------------
# For computation of "weight_decay"
self.conv_kernel = conv_kernel
# We have noticed that the results of fftconv become very noisy when the length of
# the input is very small ( < 50 samples). As this might occur when we use subsampling,
# we replace causal_fftconv by causal_conv in settings where this occurs.
if x_shape[-1] < self.train_length.item():
# Use spatial convolution:
return causal_conv(x, conv_kernel, self.bias)
else:
# Otherwise use fft convolution:
return causal_fftconv(x, conv_kernel, self.bias)
def handle_rel_positions(self, x):
"""
Handles the vector or relative positions which is given to KernelNet.
"""
if self.rel_positions is None:
if self.train_length[0] == 0:
# The ckconv has not been trained yet. Set maximum length to be 1.
self.train_length[0] = x.shape[-1]
# Calculate the maximum relative position based on the length of the train set,
# and the current length of the input.
max_relative_pos = self.calculate_max(
self.train_length.item(), current_length=x.shape[-1]
)
# Creates the vector of relative positions.
self.rel_positions = (
torch.linspace(-1.0, max_relative_pos, x.shape[-1])
.cuda()
.unsqueeze(0)
.unsqueeze(0)
) # -> With form: [batch_size=1, in_channels=1, x_dimension]
# calculate and save the sr ratio for later
if self.train_length.item() > x.shape[-1]:
self.sr_change = round(self.train_length.item() / x.shape[-1])
else:
self.sr_change = 1 / round(x.shape[-1] / self.train_length.item())
# if new signal has higher frequency
if self.sr_change < 1:
self.sigma = 0.5
return self.rel_positions
@staticmethod
def calculate_max(
train_length: int,
current_length: int,
) -> float:
"""
Calculates the maximum relative position for the current length based on the input length.
This is used to avoid kernel misalignment (see Appx. D.2).
:param train_length: Input length during training.
:param current_length: Current input length.
:return: Returns the max relative position for the calculation of the relative
positions vector. The max. of train is always equal to 1.
"""
# get sampling rate ratio
if train_length > current_length:
sr_change = round(train_length / current_length)
else:
sr_change = 1 / round(current_length / train_length)
# get step sizes (The third parameter of torch.linspace).
train_step = 2.0 / (train_length - 1)
current_step = train_step * sr_change
# Calculate the maximum relative position.
if sr_change > 1:
substract = (train_length - 1) % sr_change
max_relative_pos = 1 - substract * train_step
else:
add = (current_length - 1) % (1 / sr_change)
max_relative_pos = 1 + add * current_step
return max_relative_pos
class CKBlock(torch.nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernelnet_hidden_channels: int,
kernelnet_activation_function: str,
kernelnet_norm_type: str,
dim_linear: int,
bias: bool,
omega_0: bool,
dropout: float,
weight_dropout: float,
):
"""
Creates a Residual Block with CKConvs as:
( Follows the Residual Block of Bai et. al., 2017 )
input
| ---------------|
CKConv |
LayerNorm |
ReLU |
DropOut |
| |
CKConv |
LayerNorm |
ReLU |
DropOut |
+ <--------------|
|
ReLU
|
output
:param in_channels: Number of channels in the input signal
:param out_channels: Number of output (and hidden) channels of the block.
:param kernelnet_hidden_channels: Number of hidden units in the KernelNets of the CKConvs.
:param kernelnet_activation_function: Activation function used in the KernelNets of the CKConvs.
:param kernelnet_norm_type: Normalization type used in the KernelNets of the CKConvs (only for non-Sine KernelNets).
:param dim_linear: Spatial dimension of the input, e.g., for audio = 1, images = 2 (only 1 suported).
:param bias: If True, adds a learnable bias to the output.
:param omega_0: Value of the omega_0 value of the KernelNets. (only for non-Sine KernelNets).
:param dropout: Dropout rate of the block
:param weight_dropout: Dropout rate applied to the sampled convolutional kernels.
"""
super().__init__()
# CKConv layers
self.cconv1 = CKConv(
in_channels,
out_channels,
kernelnet_hidden_channels,
kernelnet_activation_function,
kernelnet_norm_type,
dim_linear,
bias,
omega_0,
weight_dropout,
)
self.cconv2 = CKConv(
out_channels,
out_channels,
kernelnet_hidden_channels,
kernelnet_activation_function,
kernelnet_norm_type,
dim_linear,
bias,
omega_0,
weight_dropout,
)
# Norm layers
self.norm1 = LayerNorm(out_channels)
self.norm2 = LayerNorm(out_channels)
# Dropout
self.dp = torch.nn.Dropout(dropout)
shortcut = []
if in_channels != out_channels:
shortcut.append(Linear1d(in_channels, out_channels))
self.shortcut = torch.nn.Sequential(*shortcut)
def forward(self, x):
shortcut = self.shortcut(x)
out = self.dp(torch.relu(self.norm1(self.cconv1(x))))
out = torch.relu(self.dp(torch.relu(self.norm2(self.cconv2(out)))) + shortcut)
return out
class CKCNN(torch.nn.Module):
def __init__(
self,
hidden_channels: int,
num_blocks: int, # 2
kernelnet_hidden_channels: int,
kernelnet_activation_function: str,
kernelnet_norm_type: str,
dim_linear: int,
bias: bool,
omega_0: bool, # sensitive to this param: good values <= 70
dropout: float,
weight_dropout: float,
pool: bool, # Always False in our experiments.
):
super(CKCNN, self).__init__()
blocks = []
for i in range(num_blocks):
blocks.append(
CKBlock(
# block_in_channels,
hidden_channels,
hidden_channels,
kernelnet_hidden_channels,
kernelnet_activation_function,
kernelnet_norm_type,
dim_linear,
bias,
omega_0,
dropout,
weight_dropout,
)
)
if pool:
blocks.append(torch.nn.MaxPool1d(kernel_size=2))
self.backbone = torch.nn.Sequential(*blocks)
def forward(self, x, *args, **kwargs):
# Change from (B, L, H) -> (B, H, L)
x = x.transpose(1, 2)
x = self.backbone(x)
x = x.transpose(1, 2)
return x
class CopyMemory_CKCNN(CKCNN):
def __init__(
self,
in_channels: int,
hidden_channels: int,
num_blocks: int,
kernelnet_hidden_channels: int,
kernelnet_activation_function: str,
kernelnet_norm_type: str,
dim_linear: int,
bias: bool,
omega_0: bool,
dropout: float,
weight_dropout: float,
pool: bool,
):
super().__init__(
in_channels,
hidden_channels,
num_blocks,
kernelnet_hidden_channels,
kernelnet_activation_function,
kernelnet_norm_type,
dim_linear,
bias,
omega_0,
dropout,
weight_dropout,
pool,
)
self.finallyr = torch.nn.Linear(in_features=hidden_channels, out_features=10)
# Initialize finallyr
self.finallyr.weight.data.normal_(
mean=0.0,
std=0.01,
)
self.finallyr.bias.data.fill_(value=0.0)
def forward(self, x, *args, **kwargs):
# Change from (B, S, C) -> (B, C, S)
x = x.transpose(1, 2)
out = self.backbone(x)
out = self.finallyr(out.transpose(1, 2))
return out
class AddProblem_CKCNN(CKCNN):
def __init__(
self,
in_channels: int,
hidden_channels: int,
num_blocks: int,
kernelnet_hidden_channels: int,
kernelnet_activation_function: str,
kernelnet_norm_type: str,
dim_linear: int,
bias: bool,
omega_0: bool,
dropout: float,
weight_dropout: float,
pool: bool,
):
super().__init__(
in_channels,
hidden_channels,
num_blocks,
kernelnet_hidden_channels,
kernelnet_activation_function,
kernelnet_norm_type,
dim_linear,
bias,
omega_0,
dropout,
weight_dropout,
pool,
)
self.finallyr = torch.nn.Linear(in_features=hidden_channels, out_features=1)
# Initialize finallyr
self.finallyr.weight.data.normal_(
mean=0.0,
std=0.01,
)
self.finallyr.bias.data.fill_(value=0.0)
def forward(self, x, *args, **kwargs):
# Change from (B, S, C) -> (B, C, S)
x = x.transpose(1, 2)
out = self.backbone(x)
out = self.finallyr(out[:, :, -1])
return out
class ClassificationCKCNN(CKCNN):
def __init__(
self,
# d_input: int,
# d_output: int,
d_model: int,
num_blocks: int,
kernelnet_hidden_channels: int,
kernelnet_activation_function: str,
kernelnet_norm_type: str,
dim_linear: int,
bias: bool,
omega_0: bool,
dropout: float,
weight_dropout: float,
pool: bool,
wd: float,
# **kwargs,
):
super().__init__(
# d_input,
d_model,
num_blocks,
kernelnet_hidden_channels,
kernelnet_activation_function,
kernelnet_norm_type,
dim_linear,
bias,
omega_0,
dropout,
weight_dropout,
pool,
)
self.d_model = d_model
self.d_output = d_model
self.wd = LnLoss(wd, 2)
def forward(self, x, *args, **kwargs):
# Change from (B, S, C) -> (B, C, S)
x = x.transpose(1, 2)
x = self.backbone(x)
x = x.transpose(1, 2)
return x, None # Have to return a state
def loss(self):
return self.wd.forward(model=self)
class LnLoss(torch.nn.Module):
def __init__(
self,
weight_loss: float,
norm_type: int,
):
"""
Computes the Ln loss on the CKConv kernels in a CKCNN.
:param weight_loss: Specifies the weight with which the loss will be summed to the total loss.
:param norm_type: Type of norm, e.g., 1 = L1 loss, 2 = L2 loss, ...
"""
super(LnLoss, self).__init__()
self.weight_loss = weight_loss
self.norm_type = norm_type
def forward(
self,
model: CKConv,
):
loss = 0.0
# Go through modules that are instances of CKConvs and gather the sampled filters
for m in model.modules():
if not isinstance(m, CKConv):
continue
loss = loss + m.conv_kernel.norm(self.norm_type)
loss = loss + m.bias.norm(self.norm_type)
loss = self.weight_loss * loss
return loss
| state-spaces-main | src/models/baselines/ckconv.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional, Collection
##############################################################################################################################################
# utility functions
def listify(p=None, q=None):
# https://github.com/fastai/fastai1/blob/master/fastai/core.py#L129
"Make `p` listy and the same length as `q`."
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, list):
p = [p]
# Rank 0 tensors in PyTorch are Iterable but don't have a length.
else:
try:
a = len(p)
except:
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1:
p = p * n
assert len(p) == n, f"List len mismatch ({len(p)} vs {n})"
return list(p)
def bn_drop_lin(
n_in: int,
n_out: int,
bn: bool = True,
p: float = 0.0,
actn: Optional[nn.Module] = None,
):
# https://github.com/fastai/fastai_old/blob/master/fastai_do_not_use/layers.py
"`n_in`->bn->dropout->linear(`n_in`,`n_out`)->`actn`"
layers = [nn.BatchNorm1d(n_in)] if bn else []
if p != 0:
layers.append(nn.Dropout(p))
layers.append(nn.Linear(n_in, n_out))
if actn is not None:
layers.append(actn)
return layers
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def _conv1d(
in_planes,
out_planes,
kernel_size=3,
stride=1,
dilation=1,
act="relu",
bn=True,
drop_p=0,
):
lst = []
if drop_p > 0:
lst.append(nn.Dropout(drop_p))
lst.append(
nn.Conv1d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
dilation=dilation,
bias=not (bn),
)
)
if bn:
lst.append(nn.BatchNorm1d(out_planes))
if act == "relu":
lst.append(nn.ReLU(True))
if act == "elu":
lst.append(nn.ELU(True))
if act == "prelu":
lst.append(nn.PReLU(True))
return nn.Sequential(*lst)
def _fc(in_planes, out_planes, act="relu", bn=True):
lst = [nn.Linear(in_planes, out_planes, bias=not (bn))]
if bn:
lst.append(nn.BatchNorm1d(out_planes))
if act == "relu":
lst.append(nn.ReLU(True))
if act == "elu":
lst.append(nn.ELU(True))
if act == "prelu":
lst.append(nn.PReLU(True))
return nn.Sequential(*lst)
def cd_adaptiveconcatpool(relevant, irrelevant, module):
mpr, mpi = module.mp.attrib(relevant, irrelevant)
apr, api = module.ap.attrib(relevant, irrelevant)
return torch.cat([mpr, apr], 1), torch.cat([mpi, api], 1)
def attrib_adaptiveconcatpool(self, relevant, irrelevant):
return cd_adaptiveconcatpool(relevant, irrelevant, self)
class AdaptiveConcatPool1d(nn.Module):
"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`."
def __init__(self, sz: Optional[int] = None):
"Output will be 2*sz or 2 if sz is None"
super().__init__()
sz = sz or 1
self.ap, self.mp = nn.AdaptiveAvgPool1d(sz), nn.AdaptiveMaxPool1d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def attrib(self, relevant, irrelevant):
return attrib_adaptiveconcatpool(self, relevant, irrelevant)
class SqueezeExcite1d(nn.Module):
"""squeeze excite block as used for example in LSTM FCN"""
def __init__(self, channels, reduction=16):
super().__init__()
channels_reduced = channels // reduction
self.w1 = torch.nn.Parameter(
torch.randn(channels_reduced, channels).unsqueeze(0)
)
self.w2 = torch.nn.Parameter(
torch.randn(channels, channels_reduced).unsqueeze(0)
)
def forward(self, x):
# input is bs,ch,seq
z = torch.mean(x, dim=2, keepdim=True) # bs,ch
intermed = F.relu(
torch.matmul(self.w1, z)
) # (1,ch_red,ch * bs,ch,1) = (bs, ch_red, 1)
s = F.sigmoid(
torch.matmul(self.w2, intermed)
) # (1,ch,ch_red * bs, ch_red, 1=bs, ch, 1
return s * x # bs,ch,seq * bs, ch,1 = bs,ch,seq
def weight_init(m):
"""call weight initialization for model n via n.appy(weight_init)"""
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if isinstance(m, SqueezeExcite1d):
stdv1 = math.sqrt(2.0 / m.w1.size[0])
nn.init.normal_(m.w1, 0.0, stdv1)
stdv2 = math.sqrt(1.0 / m.w2.size[1])
nn.init.normal_(m.w2, 0.0, stdv2)
def create_head1d(
nf: int,
nc: int,
lin_ftrs: Optional[Collection[int]] = None,
ps=0.5,
bn_final: bool = False,
bn: bool = True,
act="relu",
concat_pooling=True,
):
"Model head that takes `nf` features, runs through `lin_ftrs`, and about `nc` classes; added bn and act here"
lin_ftrs = (
[2 * nf if concat_pooling else nf, nc]
if lin_ftrs is None
else [2 * nf if concat_pooling else nf] + lin_ftrs + [nc]
) # was [nf, 512,nc]
ps = listify(ps)
if len(ps) == 1:
ps = [ps[0] / 2] * (len(lin_ftrs) - 2) + ps
actns = [nn.ReLU(inplace=True) if act == "relu" else nn.ELU(inplace=True)] * (
len(lin_ftrs) - 2
) + [None]
layers = [AdaptiveConcatPool1d() if concat_pooling else nn.MaxPool1d(2), Flatten()]
for ni, no, p, actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += bn_drop_lin(ni, no, bn, p, actn)
if bn_final:
layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
##############################################################################################################################################
# basic convolutional architecture
class basic_conv1d(nn.Sequential):
"""basic conv1d"""
def __init__(
self,
filters=[128, 128, 128, 128],
kernel_size=3,
stride=2,
dilation=1,
pool=0,
pool_stride=1,
squeeze_excite_reduction=0,
num_classes=2,
input_channels=8,
act="relu",
bn=True,
headless=False,
split_first_layer=False,
drop_p=0.0,
lin_ftrs_head=None,
ps_head=0.5,
bn_final_head=False,
bn_head=True,
act_head="relu",
concat_pooling=True,
):
layers = []
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * len(filters)
for i in range(len(filters)):
layers_tmp = []
layers_tmp.append(
_conv1d(
input_channels if i == 0 else filters[i - 1],
filters[i],
kernel_size=kernel_size[i],
stride=(1 if (split_first_layer is True and i == 0) else stride),
dilation=dilation,
act="none"
if (
(headless is True and i == len(filters) - 1)
or (split_first_layer is True and i == 0)
)
else act,
bn=False if (headless is True and i == len(filters) - 1) else bn,
drop_p=(0.0 if i == 0 else drop_p),
)
)
if split_first_layer is True and i == 0:
layers_tmp.append(
_conv1d(
filters[0],
filters[0],
kernel_size=1,
stride=1,
act=act,
bn=bn,
drop_p=0.0,
)
)
# layers_tmp.append(nn.Linear(filters[0],filters[0],bias=not(bn)))
# layers_tmp.append(_fc(filters[0],filters[0],act=act,bn=bn))
if pool > 0 and i < len(filters) - 1:
layers_tmp.append(
nn.MaxPool1d(pool, stride=pool_stride, padding=(pool - 1) // 2)
)
if squeeze_excite_reduction > 0:
layers_tmp.append(SqueezeExcite1d(filters[i], squeeze_excite_reduction))
layers.append(nn.Sequential(*layers_tmp))
# head
# layers.append(nn.AdaptiveAvgPool1d(1))
# layers.append(nn.Linear(filters[-1],num_classes))
# head #inplace=True leads to a runtime error see ReLU+ dropout https://discuss.pytorch.org/t/relu-dropout-inplace/13467/5
self.headless = headless
if headless is True:
head = nn.Sequential(nn.AdaptiveAvgPool1d(1), Flatten())
else:
head = create_head1d(
filters[-1],
nc=num_classes,
lin_ftrs=lin_ftrs_head,
ps=ps_head,
bn_final=bn_final_head,
bn=bn_head,
act=act_head,
concat_pooling=concat_pooling,
)
layers.append(head)
super().__init__(*layers)
def get_layer_groups(self):
return (self[2], self[-1])
def get_output_layer(self):
if self.headless is False:
return self[-1][-1]
else:
return None
def set_output_layer(self, x):
if self.headless is False:
self[-1][-1] = x
############################################################################################
# convenience functions for basic convolutional architectures
def fcn(filters=[128] * 5, num_classes=2, input_channels=8):
filters_in = filters + [num_classes]
return basic_conv1d(
filters=filters_in,
kernel_size=3,
stride=1,
pool=2,
pool_stride=2,
input_channels=input_channels,
act="relu",
bn=True,
headless=True,
)
def fcn_wang(
num_classes=2,
input_channels=8,
lin_ftrs_head=None,
ps_head=0.5,
bn_final_head=False,
bn_head=True,
act_head="relu",
concat_pooling=True,
):
return basic_conv1d(
filters=[128, 256, 128],
kernel_size=[8, 5, 3],
stride=1,
pool=0,
pool_stride=2,
num_classes=num_classes,
input_channels=input_channels,
act="relu",
bn=True,
lin_ftrs_head=lin_ftrs_head,
ps_head=ps_head,
bn_final_head=bn_final_head,
bn_head=bn_head,
act_head=act_head,
concat_pooling=concat_pooling,
)
def schirrmeister(
num_classes=2,
input_channels=8,
lin_ftrs_head=None,
ps_head=0.5,
bn_final_head=False,
bn_head=True,
act_head="relu",
concat_pooling=True,
):
return basic_conv1d(
filters=[25, 50, 100, 200],
kernel_size=10,
stride=3,
pool=3,
pool_stride=1,
num_classes=num_classes,
input_channels=input_channels,
act="relu",
bn=True,
headless=False,
split_first_layer=True,
drop_p=0.5,
lin_ftrs_head=lin_ftrs_head,
ps_head=ps_head,
bn_final_head=bn_final_head,
bn_head=bn_head,
act_head=act_head,
concat_pooling=concat_pooling,
)
def sen(
filters=[128] * 5,
num_classes=2,
input_channels=8,
squeeze_excite_reduction=16,
drop_p=0.0,
lin_ftrs_head=None,
ps_head=0.5,
bn_final_head=False,
bn_head=True,
act_head="relu",
concat_pooling=True,
):
return basic_conv1d(
filters=filters,
kernel_size=3,
stride=2,
pool=0,
pool_stride=0,
input_channels=input_channels,
act="relu",
bn=True,
num_classes=num_classes,
squeeze_excite_reduction=squeeze_excite_reduction,
drop_p=drop_p,
lin_ftrs_head=lin_ftrs_head,
ps_head=ps_head,
bn_final_head=bn_final_head,
bn_head=bn_head,
act_head=act_head,
concat_pooling=concat_pooling,
)
def basic1d(
filters=[128] * 5,
kernel_size=3,
stride=2,
dilation=1,
pool=0,
pool_stride=1,
squeeze_excite_reduction=0,
num_classes=2,
input_channels=8,
act="relu",
bn=True,
headless=False,
drop_p=0.0,
lin_ftrs_head=None,
ps_head=0.5,
bn_final_head=False,
bn_head=True,
act_head="relu",
concat_pooling=True,
):
return basic_conv1d(
filters=filters,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
pool=pool,
pool_stride=pool_stride,
squeeze_excite_reduction=squeeze_excite_reduction,
num_classes=num_classes,
input_channels=input_channels,
act=act,
bn=bn,
headless=headless,
drop_p=drop_p,
lin_ftrs_head=lin_ftrs_head,
ps_head=ps_head,
bn_final_head=bn_final_head,
bn_head=bn_head,
act_head=act_head,
concat_pooling=concat_pooling,
)
| state-spaces-main | src/models/baselines/nonaka/basic_conv1d.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.baselines.nonaka.basic_conv1d import create_head1d, Flatten
from enum import Enum
import re
# delegates
import inspect
def delegates(to=None, keep=False):
"Decorator: replace `**kwargs` in signature with params from `to`"
def _f(f):
if to is None:
to_f, from_f = f.__base__.__init__, f.__init__
else:
to_f, from_f = to, f
sig = inspect.signature(from_f)
sigd = dict(sig.parameters)
k = sigd.pop("kwargs")
s2 = {
k: v
for k, v in inspect.signature(to_f).parameters.items()
if v.default != inspect.Parameter.empty and k not in sigd
}
sigd.update(s2)
if keep:
sigd["kwargs"] = k
from_f.__signature__ = sig.replace(parameters=sigd.values())
return f
return _f
def store_attr(self, nms):
"Store params named in comma-separated `nms` from calling context into attrs in `self`"
mod = inspect.currentframe().f_back.f_locals
for n in re.split(", *", nms):
setattr(self, n, mod[n])
NormType = Enum("NormType", "Batch BatchZero Weight Spectral Instance InstanceZero")
def _conv_func(ndim=2, transpose=False):
"Return the proper conv `ndim` function, potentially `transposed`."
assert 1 <= ndim <= 3
return getattr(nn, f'Conv{"Transpose" if transpose else ""}{ndim}d')
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func and hasattr(m, "weight"):
func(m.weight)
with torch.no_grad():
if getattr(m, "bias", None) is not None:
m.bias.fill_(0.0)
return m
def _get_norm(prefix, nf, ndim=2, zero=False, **kwargs):
"Norm layer with `nf` features and `ndim` initialized depending on `norm_type`."
assert 1 <= ndim <= 3
bn = getattr(nn, f"{prefix}{ndim}d")(nf, **kwargs)
if bn.affine:
bn.bias.data.fill_(1e-3)
bn.weight.data.fill_(0.0 if zero else 1.0)
return bn
def BatchNorm(nf, ndim=2, norm_type=NormType.Batch, **kwargs):
"BatchNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm(
"BatchNorm", nf, ndim, zero=norm_type == NormType.BatchZero, **kwargs
)
class ConvLayer(nn.Sequential):
"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and `norm_type` layers."
def __init__(
self,
ni,
nf,
ks=3,
stride=1,
padding=None,
bias=None,
ndim=2,
norm_type=NormType.Batch,
bn_1st=True,
act_cls=nn.ReLU,
transpose=False,
init=nn.init.kaiming_normal_,
xtra=None,
**kwargs,
):
if padding is None:
padding = (ks - 1) // 2 if not transpose else 0
bn = norm_type in (NormType.Batch, NormType.BatchZero)
inn = norm_type in (NormType.Instance, NormType.InstanceZero)
if bias is None:
bias = not (bn or inn)
conv_func = _conv_func(ndim, transpose=transpose)
conv = init_default(
conv_func(
ni,
nf,
kernel_size=ks,
bias=bias,
stride=stride,
padding=padding,
**kwargs,
),
init,
)
if norm_type == NormType.Weight:
conv = torch.nn.utils.weight_norm(conv)
elif norm_type == NormType.Spectral:
conv = torch.nn.utils.spectral_norm(conv)
layers = [conv]
act_bn = []
if act_cls is not None:
act_bn.append(act_cls())
if bn:
act_bn.append(BatchNorm(nf, norm_type=norm_type, ndim=ndim))
if inn:
act_bn.append(InstanceNorm(nf, norm_type=norm_type, ndim=ndim))
if bn_1st:
act_bn.reverse()
layers += act_bn
if xtra:
layers.append(xtra)
super().__init__(*layers)
def AdaptiveAvgPool(sz=1, ndim=2):
"nn.AdaptiveAvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AdaptiveAvgPool{ndim}d")(sz)
def MaxPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.MaxPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"MaxPool{ndim}d")(ks, stride=stride, padding=padding)
def AvgPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.AvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AvgPool{ndim}d")(
ks, stride=stride, padding=padding, ceil_mode=ceil_mode
)
class ResBlock(nn.Module):
"Resnet block from `ni` to `nh` with `stride`"
@delegates(ConvLayer.__init__)
def __init__(
self,
expansion,
ni,
nf,
stride=1,
kernel_size=3,
groups=1,
reduction=None,
nh1=None,
nh2=None,
dw=False,
g2=1,
sa=False,
sym=False,
norm_type=NormType.Batch,
act_cls=nn.ReLU,
ndim=2,
pool=AvgPool,
pool_first=True,
**kwargs,
):
super().__init__()
norm2 = (
NormType.BatchZero
if norm_type == NormType.Batch
else NormType.InstanceZero
if norm_type == NormType.Instance
else norm_type
)
if nh2 is None:
nh2 = nf
if nh1 is None:
nh1 = nh2
nf, ni = nf * expansion, ni * expansion
k0 = dict(norm_type=norm_type, act_cls=act_cls, ndim=ndim, **kwargs)
k1 = dict(norm_type=norm2, act_cls=None, ndim=ndim, **kwargs)
layers = (
[
ConvLayer(
ni,
nh2,
kernel_size,
stride=stride,
groups=ni if dw else groups,
**k0,
),
ConvLayer(nh2, nf, kernel_size, groups=g2, **k1),
]
if expansion == 1
else [
ConvLayer(ni, nh1, 1, **k0),
ConvLayer(
nh1,
nh2,
kernel_size,
stride=stride,
groups=nh1 if dw else groups,
**k0,
),
ConvLayer(nh2, nf, 1, groups=g2, **k1),
]
)
self.convs = nn.Sequential(*layers)
convpath = [self.convs]
if reduction:
convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))
if sa:
convpath.append(SimpleSelfAttention(nf, ks=1, sym=sym))
self.convpath = nn.Sequential(*convpath)
idpath = []
if ni != nf:
idpath.append(ConvLayer(ni, nf, 1, act_cls=None, ndim=ndim, **kwargs))
if stride != 1:
idpath.insert((1, 0)[pool_first], pool(2, ndim=ndim, ceil_mode=True))
self.idpath = nn.Sequential(*idpath)
self.act = nn.ReLU(inplace=True) if act_cls is nn.ReLU else act_cls()
def forward(self, x):
return self.act(self.convpath(x) + self.idpath(x))
######################### adapted from vison.models.xresnet
def init_cnn(m):
if getattr(m, "bias", None) is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(m.weight)
for l in m.children():
init_cnn(l)
class XResNet1d(nn.Sequential):
@delegates(ResBlock)
def __init__(
self,
block,
expansion,
layers,
p=0.0,
input_channels=3,
num_classes=1000,
stem_szs=(32, 32, 64),
kernel_size=5,
kernel_size_stem=5,
widen=1.0,
sa=False,
act_cls=nn.ReLU,
lin_ftrs_head=None,
ps_head=0.5,
bn_final_head=False,
bn_head=True,
act_head="relu",
concat_pooling=True,
**kwargs,
):
store_attr(self, "block,expansion,act_cls")
stem_szs = [input_channels, *stem_szs]
stem = [
ConvLayer(
stem_szs[i],
stem_szs[i + 1],
ks=kernel_size_stem,
stride=2 if i == 0 else 1,
act_cls=act_cls,
ndim=1,
)
for i in range(3)
]
# block_szs = [int(o*widen) for o in [64,128,256,512] +[256]*(len(layers)-4)]
block_szs = [
int(o * widen) for o in [64, 64, 64, 64] + [32] * (len(layers) - 4)
]
block_szs = [64 // expansion] + block_szs
blocks = [
self._make_layer(
ni=block_szs[i],
nf=block_szs[i + 1],
blocks=l,
stride=1 if i == 0 else 2,
kernel_size=kernel_size,
sa=sa and i == len(layers) - 4,
ndim=1,
**kwargs,
)
for i, l in enumerate(layers)
]
head = create_head1d(
block_szs[-1] * expansion,
nc=num_classes,
lin_ftrs=lin_ftrs_head,
ps=ps_head,
bn_final=bn_final_head,
bn=bn_head,
act=act_head,
concat_pooling=concat_pooling,
)
super().__init__(
*stem,
nn.MaxPool1d(kernel_size=3, stride=2, padding=1),
*blocks,
head,
)
init_cnn(self)
def _make_layer(self, ni, nf, blocks, stride, kernel_size, sa, **kwargs):
return nn.Sequential(
*[
self.block(
self.expansion,
ni if i == 0 else nf,
nf,
stride=stride if i == 0 else 1,
kernel_size=kernel_size,
sa=sa and i == (blocks - 1),
act_cls=self.act_cls,
**kwargs,
)
for i in range(blocks)
]
)
def get_layer_groups(self):
return (self[3], self[-1])
def get_output_layer(self):
return self[-1][-1]
def set_output_layer(self, x):
self[-1][-1] = x
def forward(self, x, *args, **kwargs):
y = super().forward(x.transpose(-1, -2))
return y, None
# xresnets
def _xresnet1d(expansion, layers, **kwargs):
return XResNet1d(ResBlock, expansion, layers, **kwargs)
def xresnet1d18(**kwargs):
return _xresnet1d(1, [2, 2, 2, 2], **kwargs)
def xresnet1d34(**kwargs):
return _xresnet1d(1, [3, 4, 6, 3], **kwargs)
def xresnet1d50(**kwargs):
return _xresnet1d(4, [3, 4, 6, 3], **kwargs)
def xresnet1d101(**kwargs):
return _xresnet1d(4, [3, 4, 23, 3], **kwargs)
def xresnet1d152(**kwargs):
return _xresnet1d(4, [3, 8, 36, 3], **kwargs)
def xresnet1d18_deep(**kwargs):
return _xresnet1d(1, [2, 2, 2, 2, 1, 1], **kwargs)
def xresnet1d34_deep(**kwargs):
return _xresnet1d(1, [3, 4, 6, 3, 1, 1], **kwargs)
def xresnet1d50_deep(**kwargs):
return _xresnet1d(4, [3, 4, 6, 3, 1, 1], **kwargs)
def xresnet1d18_deeper(**kwargs):
return _xresnet1d(1, [2, 2, 1, 1, 1, 1, 1, 1], **kwargs)
def xresnet1d34_deeper(**kwargs):
return _xresnet1d(1, [3, 4, 6, 3, 1, 1, 1, 1], **kwargs)
def xresnet1d50_deeper(**kwargs):
return _xresnet1d(4, [3, 4, 6, 3, 1, 1, 1, 1], **kwargs)
| state-spaces-main | src/models/baselines/nonaka/xresnet.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from src.models.baselines.nonaka.basic_conv1d import AdaptiveConcatPool1d, create_head1d
########################################################################################################
# Inception time inspired by https://github.com/hfawaz/InceptionTime/blob/master/classifiers/inception.py and https://github.com/tcapelle/TimeSeries_fastai/blob/master/inception.py
def conv(in_planes, out_planes, kernel_size=3, stride=1):
"""Convolution with padding."""
return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=(kernel_size-1)//2, bias=False)
def noop(x): return x
class InceptionBlock1d(nn.Module):
def __init__(self, ni, nb_filters, kss, stride=1, act='linear', bottleneck_size=32):
super().__init__()
self.bottleneck = conv(ni, bottleneck_size, 1, stride) if (bottleneck_size>0) else noop
self.convs = nn.ModuleList([conv(bottleneck_size if (bottleneck_size>0) else ni, nb_filters, ks) for ks in kss])
self.conv_bottle = nn.Sequential(nn.MaxPool1d(3, stride, padding=1), conv(ni, nb_filters, 1))
self.bn_relu = nn.Sequential(nn.BatchNorm1d((len(kss)+1)*nb_filters), nn.ReLU())
def forward(self, x):
#print("block in",x.size())
bottled = self.bottleneck(x)
out = self.bn_relu(torch.cat([c(bottled) for c in self.convs]+[self.conv_bottle(x)], dim=1))
return out
class Shortcut1d(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.act_fn=nn.ReLU(True)
self.conv=conv(ni, nf, 1)
self.bn=nn.BatchNorm1d(nf)
def forward(self, inp, out):
#print("sk",out.size(), inp.size(), self.conv(inp).size(), self.bn(self.conv(inp)).size)
#input()
return self.act_fn(out + self.bn(self.conv(inp)))
class InceptionBackbone(nn.Module):
def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual):
super().__init__()
self.depth = depth
assert((depth % 3) == 0)
self.use_residual = use_residual
n_ks = len(kss) + 1
self.im = nn.ModuleList([InceptionBlock1d(input_channels if d==0 else n_ks*nb_filters,nb_filters=nb_filters,kss=kss, bottleneck_size=bottleneck_size) for d in range(depth)])
self.sk = nn.ModuleList([Shortcut1d(input_channels if d==0 else n_ks*nb_filters, n_ks*nb_filters) for d in range(depth//3)])
def forward(self, x):
input_res = x
for d in range(self.depth):
x = self.im[d](x)
if self.use_residual and d % 3 == 2:
x = (self.sk[d//3])(input_res, x)
input_res = x.clone()
return x
class Inception1d(nn.Module):
"""Inception time architecture."""
def __init__(self, num_classes=2, input_channels=8, kernel_size=40, depth=6, bottleneck_size=32, nb_filters=32, use_residual=True,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True):
super().__init__()
assert(kernel_size>=40)
kernel_size = [k-1 if k%2==0 else k for k in [kernel_size,kernel_size//2,kernel_size//4]] #was 39,19,9
layers = [InceptionBackbone(input_channels=input_channels, kss=kernel_size, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)]
n_ks = len(kernel_size) + 1
#head
head = create_head1d(n_ks*nb_filters, nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling)
layers.append(head)
#layers.append(AdaptiveConcatPool1d())
#layers.append(Flatten())
#layers.append(nn.Linear(2*n_ks*nb_filters, num_classes))
self.layers = nn.Sequential(*layers)
def forward(self, x, *args, **kwargs):
y = self.layers(x.transpose(-1, -2))
return y, None
def get_layer_groups(self):
depth = self.layers[0].depth
if(depth>3):
return ((self.layers[0].im[3:],self.layers[0].sk[1:]),self.layers[-1])
else:
return (self.layers[-1])
def get_output_layer(self):
return self.layers[-1][-1]
def set_output_layer(self,x):
self.layers[-1][-1] = x
def inception1d(**kwargs):
"""Constructs an Inception model."""
return Inception1d(**kwargs)
| state-spaces-main | src/models/baselines/nonaka/inception.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from src.models.baselines.nonaka.basic_conv1d import create_head1d, Flatten
###############################################################################################
# Standard resnet
def conv(in_planes, out_planes, stride=1, kernel_size=3):
"convolution with padding"
return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=(kernel_size-1)//2, bias=False)
class BasicBlock1d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, kernel_size=[3,3], downsample=None):
super().__init__()
# if(isinstance(kernel_size,int)): kernel_size = [kernel_size,kernel_size//2+1]
if(isinstance(kernel_size,int)): kernel_size = [kernel_size, kernel_size]
self.conv1 = conv(inplanes, planes, stride=stride, kernel_size=kernel_size[0])
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv(planes, planes,kernel_size=kernel_size[1])
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x if self.downsample is None else self.downsample(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x += residual
x = self.relu(x)
return x
class Bottleneck1d(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, kernel_size=3, downsample=None):
super().__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=kernel_size, stride=stride,
padding=(kernel_size-1)//2, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm1d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet1d(nn.Sequential):
'''1d adaptation of the torchvision resnet'''
def __init__(self, block, layers, kernel_size=3, num_classes=2, input_channels=3, inplanes=64, fix_feature_dim=True, kernel_size_stem = None, stride_stem=2, pooling_stem=True, stride=2, lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True):
self.inplanes = inplanes
layers_tmp = []
if(kernel_size_stem is None):
kernel_size_stem = kernel_size[0] if isinstance(kernel_size,list) else kernel_size
#stem
layers_tmp.append(nn.Conv1d(input_channels, inplanes, kernel_size=kernel_size_stem, stride=stride_stem, padding=(kernel_size_stem-1)//2,bias=False))
layers_tmp.append(nn.BatchNorm1d(inplanes))
layers_tmp.append(nn.ReLU(inplace=True))
if(pooling_stem is True):
layers_tmp.append(nn.MaxPool1d(kernel_size=3, stride=2, padding=1))
#backbone
for i,l in enumerate(layers):
if(i==0):
layers_tmp.append(self._make_layer(block, inplanes, layers[0],kernel_size=kernel_size))
else:
layers_tmp.append(self._make_layer(block, inplanes if fix_feature_dim else (2**i)*inplanes, layers[i], stride=stride,kernel_size=kernel_size))
#head
#layers_tmp.append(nn.AdaptiveAvgPool1d(1))
#layers_tmp.append(Flatten())
#layers_tmp.append(nn.Linear((inplanes if fix_feature_dim else (2**len(layers)*inplanes)) * block.expansion, num_classes))
head = create_head1d((inplanes if fix_feature_dim else (2**len(layers)*inplanes)) * block.expansion, nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling)
layers_tmp.append(head)
super().__init__(*layers_tmp)
def _make_layer(self, block, planes, blocks, stride=1,kernel_size=3):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, kernel_size, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def get_layer_groups(self):
return (self[6],self[-1])
def get_output_layer(self):
return self[-1][-1]
def set_output_layer(self,x):
self[-1][-1]=x
def forward(self, x, *args, **kwargs):
y = super().forward(x.transpose(-1, -2))
return y, None
def resnet1d18(**kwargs):
"""Constructs a ResNet-18 model.
"""
return ResNet1d(BasicBlock1d, [2, 2, 2, 2], **kwargs)
def resnet1d34(**kwargs):
"""Constructs a ResNet-34 model.
"""
return ResNet1d(BasicBlock1d, [3, 4, 6, 3], **kwargs)
def resnet1d50(**kwargs):
"""Constructs a ResNet-50 model.
"""
return ResNet1d(Bottleneck1d, [3, 4, 6, 3], **kwargs)
def resnet1d101(**kwargs):
"""Constructs a ResNet-101 model.
"""
return ResNet1d(Bottleneck1d, [3, 4, 23, 3], **kwargs)
def resnet1d152(**kwargs):
"""Constructs a ResNet-152 model.
"""
return ResNet1d(Bottleneck1d, [3, 8, 36, 3], **kwargs)
#original used kernel_size_stem = 8
def resnet1d_wang(**kwargs):
if(not("kernel_size" in kwargs.keys())):
kwargs["kernel_size"]=[5,3]
if(not("kernel_size_stem" in kwargs.keys())):
kwargs["kernel_size_stem"]=7
if(not("stride_stem" in kwargs.keys())):
kwargs["stride_stem"]=1
if(not("pooling_stem" in kwargs.keys())):
kwargs["pooling_stem"]=False
if(not("inplanes" in kwargs.keys())):
kwargs["inplanes"]=128
return ResNet1d(BasicBlock1d, [1, 1, 1], **kwargs)
def resnet1d(**kwargs):
"""Constructs a custom ResNet model.
"""
return ResNet1d(BasicBlock1d, **kwargs)
###############################################################################################
# wide resnet adopted from fastai wrn
def noop(x): return x
def conv1d(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias=False) -> nn.Conv1d:
"Create `nn.Conv1d` layer: `ni` inputs, `nf` outputs, `ks` kernel size. `padding` defaults to `k//2`."
if padding is None: padding = ks//2
return nn.Conv1d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias)
def _bn1d(ni, init_zero=False):
"Batchnorm layer with 0 initialization"
m = nn.BatchNorm1d(ni)
m.weight.data.fill_(0 if init_zero else 1)
m.bias.data.zero_()
return m
def bn_relu_conv1d(ni, nf, ks, stride, init_zero=False):
bn_initzero = _bn1d(ni, init_zero=init_zero)
return nn.Sequential(bn_initzero, nn.ReLU(inplace=True), conv1d(ni, nf, ks, stride))
class BasicBlock1dwrn(nn.Module):
def __init__(self, ni, nf, stride, drop_p=0.0, ks=3):
super().__init__()
if(isinstance(ks,int)):
ks = [ks,ks//2+1]
self.bn = nn.BatchNorm1d(ni)
self.conv1 = conv1d(ni, nf, ks[0], stride)
self.conv2 = bn_relu_conv1d(nf, nf, ks[0], 1)
self.drop = nn.Dropout(drop_p, inplace=True) if drop_p else None
self.shortcut = conv1d(ni, nf, ks[1], stride) if (ni != nf or stride>1) else noop #adapted to make it work for fix_feature_dim=True
def forward(self, x):
x2 = F.relu(self.bn(x), inplace=True)
r = self.shortcut(x2)
x = self.conv1(x2)
if self.drop: x = self.drop(x)
x = self.conv2(x) * 0.2
return x.add_(r)
def _make_group(N, ni, nf, block, stride, drop_p,ks=3):
return [block(ni if i == 0 else nf, nf, stride if i == 0 else 1, drop_p,ks=ks) for i in range(N)]
class WideResNet1d(nn.Sequential):
def __init__(self, input_channels:int, num_groups:int, N:int, num_classes:int, k:int=1, drop_p:float=0.0, start_nf:int=16,fix_feature_dim=True,kernel_size=5,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True):
super().__init__()
n_channels = [start_nf]
for i in range(num_groups): n_channels.append(start_nf if fix_feature_dim else start_nf*(2**i)*k)
layers = [conv1d(input_channels, n_channels[0], 3, 1)] # conv1 stem
for i in range(num_groups):
layers += _make_group(N, n_channels[i], n_channels[i+1], BasicBlock1dwrn, (1 if i==0 else 2), drop_p,ks=kernel_size)
#layers += [nn.BatchNorm1d(n_channels[-1]), nn.ReLU(inplace=True), nn.AdaptiveAvgPool1d(1),
# Flatten(), nn.Linear(n_channels[-1], num_classes)]
head = create_head1d(n_channels[-1], nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling)
layers.append(head)
super().__init__(*layers)
def get_layer_groups(self):
return (self[6],self[-1])
def get_output_layer(self):
return self[-1][-1]
def set_output_layer(self,x):
self[-1][-1] = x
def wrn1d_22(**kwargs): return WideResNet1d(num_groups=3, N=3, k=6, drop_p=0.,**kwargs)
| state-spaces-main | src/models/baselines/nonaka/resnet.py |
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import torch
from einops import rearrange, repeat
contract = torch.einsum
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
if conj:
x = _conj(x)
v = _conj(v)
vandermonde_matrix = x.unsqueeze(-1) ** torch.arange(L).to(x) # (... N L)
vandermonde_prod = torch.sum(v.unsqueeze(-1) * vandermonde_matrix, dim=-2) # (... L)
return vandermonde_prod
def log_vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... n, ... n l -> ... l', v, vandermonde_matrix) # (... L)
if conj:
return 2*vandermonde_prod.real
else:
return vandermonde_prod
def log_vandermonde_lazy(v, x, L, conj=True):
if conj:
v = _conj(v)
x = _conj(x)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
x_l = LazyTensor(rearrange(x, '... N -> ... N 1 1'))
l_l = LazyTensor(rearrange(l, '... L -> ... 1 L 1'))
# exp
vand = (x_l * l_l).exp()
s = (v_l*vand).sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def log_vandermonde(v, x, L, conj=True):
expr = 'ComplexMult(v, ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'v = Vj(2)',
'x = Vj(2)',
'l = Vi(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(v, x, l, backend='GPU')
if conj:
return 2*_r2c(r).real
else:
return _r2c(r)
def log_vandermonde_transpose_naive(u, v, x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... l, ... n, ... n l -> ... n', u.to(x), v.to(x), vandermonde_matrix) # (... L)
return vandermonde_prod
def log_vandermonde_transpose(u, v, x, L):
"""
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = 'ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'u = Vj(2)',
'v = Vi(2)',
'x = Vi(2)',
'l = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
u, v, x, l = _broadcast_dims(u, v, x, l)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(u, v, x, l, backend='GPU')
return _r2c(r)
def _log_vandermonde_matmul(x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
return vandermonde_matrix
def log_vandermonde_matmul(v, K):
prod = contract('...n, ...nl -> ...l', v, K)
return 2*prod.real
| state-spaces-main | src/models/functional/vandermonde.py |
"""Old utilities for parallel scan implementation of Linear RNNs."""
# TODO this file could use much cleanup
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from src.models.functional.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
from src.utils.permutations import bitreversal_po2, bitreversal_permutation
### Utilities
def shift_up(a, s=None, drop=True, dim=0):
assert dim == 0
if s is None:
s = torch.zeros_like(a[0, ...])
s = s.unsqueeze(dim)
if drop:
a = a[:-1, ...]
return torch.cat((s, a), dim=dim)
def interleave(a, b, uneven=False, dim=0):
""" Interleave two tensors of same shape """
# assert(a.shape == b.shape)
assert dim == 0 # TODO temporary to make handling uneven case easier
if dim < 0:
dim = N + dim
if uneven:
a_ = a[-1:, ...]
a = a[:-1, ...]
c = torch.stack((a, b), dim+1)
out_shape = list(a.shape)
out_shape[dim] *= 2
c = c.view(out_shape)
if uneven:
c = torch.cat((c, a_), dim=dim)
return c
def batch_mult(A, u, has_batch=None):
""" Matrix mult A @ u with special case to save memory if u has additional batch dim
The batch dimension is assumed to be the second dimension
A : (L, ..., N, N)
u : (L, [B], ..., N)
has_batch: True, False, or None. If None, determined automatically
Output:
x : (L, [B], ..., N)
A @ u broadcasted appropriately
"""
if has_batch is None:
has_batch = len(u.shape) >= len(A.shape)
if has_batch:
u = u.permute([0] + list(range(2, len(u.shape))) + [1])
else:
u = u.unsqueeze(-1)
v = (A @ u)
if has_batch:
v = v.permute([0] + [len(u.shape)-1] + list(range(1, len(u.shape)-1)))
else:
v = v[..., 0]
return v
### Main unrolling functions
def unroll(A, u):
"""
A : (..., N, N) # TODO I think this can't take batch dimension?
u : (L, ..., N)
output : x (..., N) # TODO a lot of these shapes are wrong
x[i, ...] = A^{i} @ u[0, ...] + ... + A @ u[i-1, ...] + u[i, ...]
"""
m = u.new_zeros(u.shape[1:])
outputs = []
for u_ in torch.unbind(u, dim=0):
m = F.linear(m, A) + u_
outputs.append(m)
output = torch.stack(outputs, dim=0)
return output
def parallel_unroll_recursive(A, u):
""" Bottom-up divide-and-conquer version of unroll. """
# Main recursive function
def parallel_unroll_recursive_(A, u):
if u.shape[0] == 1:
return u
u_evens = u[0::2, ...]
u_odds = u[1::2, ...]
# u2 = F.linear(u_evens, A) + u_odds
u2 = (A @ u_evens.unsqueeze(-1)).squeeze(-1) + u_odds
A2 = A @ A
x_odds = parallel_unroll_recursive_(A2, u2)
# x_evens = F.linear(shift_up(x_odds), A) + u_evens
x_evens = (A @ shift_up(x_odds).unsqueeze(-1)).squeeze(-1) + u_evens
x = interleave(x_evens, x_odds, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
return parallel_unroll_recursive_(A, u)[:n, ...]
def parallel_unroll_recursive_br(A, u):
""" Same as parallel_unroll_recursive but uses bit reversal for locality. """
# Main recursive function
def parallel_unroll_recursive_br_(A, u):
n = u.shape[0]
if n == 1:
return u
m = n//2
u_0 = u[:m, ...]
u_1 = u[m:, ...]
u2 = F.linear(u_0, A) + u_1
A2 = A @ A
x_1 = parallel_unroll_recursive_br_(A2, u2)
x_0 = F.linear(shift_up(x_1), A) + u_0
# x = torch.cat((x_0, x_1), dim=0) # is there a way to do this with cat?
x = interleave(x_0, x_1, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
x = parallel_unroll_recursive_br_(A, u)
return x[:n, ...]
def parallel_unroll_iterative(A, u):
""" Bottom-up divide-and-conquer version of unroll, implemented iteratively """
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
# Main recursive loop, flattened
us = [] # stores the u_0 terms in the recursive version
N_ = N
As = [] # stores the A matrices
for l in range(m):
N_ = N_ // 2
As.append(A)
u_0 = u[:N_, ...]
us.append(u_0)
u = F.linear(u_0, A) + u[N_:, ...]
A = A @ A
x_0 = []
x = u # x_1
for l in range(m-1, -1, -1):
x_0 = F.linear(shift_up(x), As[l]) + us[l]
x = interleave(x_0, x, dim=0)
return x[:n, ...]
def variable_unroll_sequential(A, u, s=None, variable=True):
""" Unroll with variable (in time/length) transitions A.
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] @ s + A[i..1] @ u[0] + ... + A[i] @ u[i-1] + u[i]
"""
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
has_batch = len(u.shape) >= len(A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
# s = F.linear(s, A_) + u_
s = batch_mult(A_.unsqueeze(0), s.unsqueeze(0), has_batch)[0]
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll(A, u, s=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll. """
if u.shape[0] <= recurse_limit:
return variable_unroll_sequential(A, u, s, variable)
if s is None:
s = torch.zeros_like(u[0])
uneven = u.shape[0] % 2 == 1
has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = A_1 @ A_0_
# Recursive call
x_1 = variable_unroll(A_10, u_10, s, variable, recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_general_sequential(A, u, s, op, variable=True):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
if not variable:
A = A.expand((u.shape[0],) + A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
s = op(A_, s)
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll_matrix_sequential(A, u, s=None, variable=True):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
return variable_unroll_general_sequential(A, u, s, op, variable=True)
def variable_unroll_toeplitz_sequential(A, u, s=None, variable=True, pad=False):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
ret = variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply_padded, variable=True)
ret = ret[..., :n]
return ret
return variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply, variable=True)
### General parallel scan functions with generic binary composition operators
def variable_unroll_general(A, u, s, op, compose_op=None, sequential_op=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll.
compose is an optional function that defines how to compose A without multiplying by a leaf u
"""
if u.shape[0] <= recurse_limit:
if sequential_op is None:
sequential_op = op
return variable_unroll_general_sequential(A, u, s, sequential_op, variable)
if compose_op is None:
compose_op = op
uneven = u.shape[0] % 2 == 1
# has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = op(A_1, u_0_) # batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = compose_op(A_1, A_0_)
# Recursive call
x_1 = variable_unroll_general(A_10, u_10, s, op, compose_op, sequential_op, variable=variable, recurse_limit=recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = op(A_0, x_0) # batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_matrix(A, u, s=None, variable=True, recurse_limit=16):
if s is None:
s = torch.zeros_like(u[0])
has_batch = len(u.shape) >= len(A.shape)
op = lambda x, y: batch_mult(x, y, has_batch)
sequential_op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
matmul = lambda x, y: x @ y
return variable_unroll_general(A, u, s, op, compose_op=matmul, sequential_op=sequential_op, variable=variable, recurse_limit=recurse_limit)
def variable_unroll_toeplitz(A, u, s=None, variable=True, recurse_limit=8, pad=False):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (L, [B], ..., N) same shape as u
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
# Add the batch dimension to A if necessary
A_batch_dims = len(A.shape) - int(variable)
u_batch_dims = len(u.shape)-1
if u_batch_dims > A_batch_dims:
# assert u_batch_dims == A_batch_dims + 1
if variable:
while len(A.shape) < len(u.shape):
A = A.unsqueeze(1)
# else:
# A = A.unsqueeze(0)
if s is None:
s = torch.zeros_like(u[0])
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
op = triangular_toeplitz_multiply_padded
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
ret = ret[..., :n]
return ret
op = triangular_toeplitz_multiply
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
return ret
| state-spaces-main | src/models/functional/unroll.py |
"""pykeops implementations of the core Cauchy kernel used in the S4 algorithm.
The interface of the Cauchy multiplication is:
Inputs:
v: (N)
z: (N)
w: (L)
Returns: y (L)
y_k = \sum_i v_i / (z_i - w_k)
"""
import torch
from einops import rearrange
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def cauchy_naive(v, z, w, conj=True):
"""
v: (..., N)
z: (..., L)
w: (..., N)
returns: (..., L) \sum v/(z-w)
"""
if conj:
v = _conj(v)
w = _conj(w)
cauchy_matrix = v.unsqueeze(-1) / (z.unsqueeze(-2) - w.unsqueeze(-1)) # (... N L)
return torch.sum(cauchy_matrix, dim=-2)
def cauchy_lazy(v, z, w, conj=True):
if conj:
v = _conj(v)
w = _conj(w)
v, z, w = _broadcast_dims(v, z, w)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
w_l = LazyTensor(rearrange(w, '... N -> ... N 1 1'))
z_l = LazyTensor(rearrange(z, '... L -> ... 1 L 1'))
sub = z_l - w_l # (b N L 1), for some reason it doesn't display the last dimension
div = v_l / sub
s = div.sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def cauchy(v, z, w, conj=False):
expr = 'ComplexDivide(v, z-w)'
cauchy_mult = Genred(
expr,
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
if conj:
v = _conj(v)
w = _conj(w)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
def cauchy_real(v, z, w):
expr = 'v / (z - w)'
cauchy_mult = Genred(
expr,
[
'v = Vj(1)',
'z = Vi(1)',
'w = Vj(1)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = v.unsqueeze(-1)
z = z.unsqueeze(-1)
w = w.unsqueeze(-1)
r = cauchy_mult(v, z, w, backend='GPU')
return r
def cauchy_conj(v, z, w, num=2, denom=2):
if num == 1:
expr_num = 'z * ComplexReal(v) - Real2Complex(ComplexReal(v)*ComplexReal(w) + ComplexImag(v)*ComplexImag(w))'
elif num == 2:
expr_num = 'z * ComplexReal(v) - Real2Complex(Sum(v * w))'
else: raise NotImplementedError
if denom == 1:
expr_denom = 'ComplexMult(z-Real2Complex(ComplexReal(w)), z-Real2Complex(ComplexReal(w))) + Real2Complex(Square(ComplexImag(w)))'
elif denom == 2:
expr_denom = 'ComplexMult(z-w, z-Conj(w))'
else: raise NotImplementedError
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = 2*cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
def cauchy_conj_components(v, z, w):
""" Assumes z is pure imaginary (as in S4 with bilinear) """
expr_num = 'Imag2Complex(zi*vr) - Real2Complex(vr*wr + vi*wi)'
expr_denom = 'Real2Complex(Square(wr)+Square(wi)-Square(zi)) - Imag2Complex(IntCst(2)*zi*wr)'
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
[
'vr = Vj(1)',
'vi = Vj(1)',
'wr = Vj(1)',
'wi = Vj(1)',
'zi = Vi(1)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = v.unsqueeze(-1)
z = z.unsqueeze(-1)
w = w.unsqueeze(-1)
v_r, v_i = v.real.contiguous(), v.imag.contiguous()
w_r, w_i = w.real.contiguous(), w.imag.contiguous()
z_i = z.imag.contiguous()
r = 2*cauchy_mult(v_r, v_i, w_r, w_i, z_i, backend='GPU')
return _r2c(r)
def cauchy_conj_components_lazy(v, z, w, type=1):
v, z, w = _broadcast_dims(v, z, w)
v_r, v_i = v.real.contiguous(), v.imag.contiguous()
w_r, w_i = w.real.contiguous(), w.imag.contiguous()
z_i = z.imag.contiguous()
v_r = LazyTensor(rearrange(v_r, '... N -> ... 1 N 1'))
v_i = LazyTensor(rearrange(v_i, '... N -> ... 1 N 1'))
w_r = LazyTensor(rearrange(w_r, '... N -> ... 1 N 1'))
w_i = LazyTensor(rearrange(w_i, '... N -> ... 1 N 1'))
z_i = LazyTensor(rearrange(z_i, '... L -> ... L 1 1'))
if type == 1:
num = -v_r*w_r-v_i*w_i + 1j* z_i*v_r
denom = w_r**2+w_i**2-z_i**2 - 2j*w_r*z_i
else:
# z = torch.complex(-w_r, z_i) # Not supported
z = -w_r + 1j* z_i
num = v_r * z - v_i*w_i
denom = z*z + w_i**2 # z**2 is bugged for complex
r = num / denom
r = 2*r.sum(dim=len(z_i.shape)-1)
return r.squeeze(-1)
def cauchy_conj2(v, z, w):
expr = 'ComplexDivide(v, z-w) + ComplexDivide(Conj(v), z-Conj(w))'
cauchy_mult = Genred(
expr,
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
if complex:
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
| state-spaces-main | src/models/functional/cauchy.py |
"""Compute a Krylov function efficiently.
Note that LSSL called this a Krylov function for lack of better terminology,
while S4 renames the Krylov function to a "state space kernel".
An existing term in the literature is "Markov parameters" of an SSM.
The interface for this function is:
Inputs:
A : (N, N)
B : (N,)
C : (N,)
Returns:
[C^T A^i B for i in [L]]
"""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.functional.toeplitz import causal_convolution
def krylov_sequential(L, A, b, c=None):
"""Compute the krylov function naively by sequential powering.
A : (..., N, N)
b : (..., N)
c : (..., N)
Returns
if c:
x : (..., L)
x[i, l] = c[i] @ A^l @ b[i]
else:
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
# Check which of dim b and c is smaller to save memory
if c is not None and c.numel() < b.numel():
return krylov_sequential(L, A.transpose(-1, -2), c, b)
b_ = b
x = []
for _ in range(L):
if c is not None:
x_ = torch.sum(c*b_, dim=-1) # (...) # could be faster with matmul or einsum?
else:
x_ = b_
x.append(x_)
b_ = (A @ b_.unsqueeze(-1)).squeeze(-1)
x = torch.stack(x, dim=-1)
return x
def krylov(L, A, b, c=None, return_power=False):
"""Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
_x = A_ @ _x
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
assert x.shape[-1] == L
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
@torch.no_grad()
def power(L, A, v=None):
"""Compute A^L and the scan sum_i A^i v_i.
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
if v is None:
powers = [powers[-1] @ powers[-1]]
else:
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
def krylov_toeplitz(L, A, b, c=None):
"""Specializes to lower triangular Toeplitz matrix A represented by its diagonals.
A : (..., N)
b : (..., N)
c : (..., N)
Returns
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
x = b.unsqueeze(0) # (1, ..., N)
A_ = A
while x.shape[0] < L:
xx = causal_convolution(A_, x)
x = torch.cat([x, xx], dim=0) # there might be a more efficient way of ordering axes
A_ = causal_convolution(A_, A_)
x = x[:L, ...] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
def krylov_toeplitz_(L, A, b, c=None):
""" Padded version of krylov_toeplitz that saves some fft's
TODO currently not faster than original version, not sure why
"""
N = A.shape[-1]
x = b.unsqueeze(0) # (1, ..., N)
x = F.pad(x, (0, N))
A = F.pad(A, (0, N))
done = L == 1
while not done:
l = x.shape[0]
# Save memory on last iteration
if L - l <= l:
done = True
_x = x[:L-l]
else: _x = x
Af = torch.fft.rfft(A, n=2*N, dim=-1)
xf = torch.fft.rfft(_x, n=2*N, dim=-1)
xf_ = Af * xf
x_ = torch.fft.irfft(xf_, n=2*N, dim=-1)
x_[..., N:] = 0
x = torch.cat([x, x_], dim=0) # there might be a more efficient way of ordering axes
if not done:
A = torch.fft.irfft(Af*Af, n=2*N, dim=-1)
A[..., N:] = 0
x = x[:L, ..., :N] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
| state-spaces-main | src/models/functional/krylov.py |
"""Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] *= f
return K
def triangular_toeplitz_multiply_(u, v, sum=None):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = u_f * v_f
if sum is not None:
uv_f = uv_f.sum(dim=sum)
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
def triangular_toeplitz_multiply_padded_(u, v):
""" Same as triangular_toeplitz_multiply but inputs and output assume to be 0-padded already. """
n = u.shape[-1]
assert n % 2 == 0
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n:] = 0
return output
class TriangularToeplitzMult(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
return triangular_toeplitz_multiply_(u, v)
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultFast(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad.flip(-1), (0, n))
g_f = torch.fft.rfft(g_expand, n=2*n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=2*n, dim=-1)[..., :n]
d_v = torch.fft.irfft(gu_f, n=2*n, dim=-1)[..., :n]
d_u = d_u.flip(-1)
d_v = d_v.flip(-1)
return d_u, d_v
class TriangularToeplitzMultPadded(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
output = triangular_toeplitz_multiply_(u, v)
return output
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_padded_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_padded_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultPaddedFast(torch.autograd.Function):
""" Trade off speed (20-25% faster) for more memory (20-25%) """
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n//2:].zero_()
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad[..., :n//2].flip(-1), (0, n//2))
g_f = torch.fft.rfft(g_expand, n=n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=n, dim=-1)
d_v = torch.fft.irfft(gu_f, n=n, dim=-1)
d_u[..., n//2:].zero_()
d_v[..., n//2:].zero_()
d_u[..., :n//2] = d_u[..., :n//2].flip(-1) # TODO
d_v[..., :n//2] = d_v[..., :n//2].flip(-1) # TODO
return d_u, d_v
# triangular_toeplitz_multiply = triangular_toeplitz_multiply_
triangular_toeplitz_multiply = TriangularToeplitzMult.apply
triangular_toeplitz_multiply_fast = TriangularToeplitzMultFast.apply
triangular_toeplitz_multiply_padded = TriangularToeplitzMultPadded.apply
triangular_toeplitz_multiply_padded_fast = TriangularToeplitzMultPaddedFast.apply
def causal_convolution(u, v, fast=True, pad=False):
if not pad and not fast:
return triangular_toeplitz_multiply(u, v)
if not pad and fast:
return triangular_toeplitz_multiply_fast(u, v)
if pad and not fast:
return triangular_toeplitz_multiply_padded(u, v)
if pad and fast:
return triangular_toeplitz_multiply_padded_fast(u, v)
def _fft(x, N): return torch.fft.rfft(F.pad(x, (0, 2*N-x.shape[-1])), n=2*N, dim=-1)
def _ifft(x, N): return torch.fft.irfft(x, n=2*N, dim=-1)[..., :N]
def causal_convolution_inverse(u):
""" Invert the causal convolution/polynomial/triangular Toeplitz matrix represented by u.
This is easiest in the polynomial view:
https://www.csa.iisc.ac.in/~chandan/courses/CNT/notes/lec5.pdf
The idea is that
h = g^{-1} (mod x^m) => 2h - gh^2 = g^{-1} (mod x^{2m})
# TODO this can be numerically unstable if input is "poorly conditioned",
# for example if u[0] is magnitudes different from the rest of u
"""
N = u.shape[-1]
v = u[..., :1].reciprocal()
while v.shape[-1] < N:
M = v.shape[-1]
v_f = _fft(v, 2*M)
u_f = _fft(u[..., :2*M], 2*M)
_v = -_ifft(u_f * v_f**2, 2*M)
_v[..., :M] = _v[..., :M] + 2*v
v = _v
# TODO contiguous?
v = v[..., :N]
return v
| state-spaces-main | src/models/functional/toeplitz.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import csv
from pathlib import Path
import pandas as pd
XX_EN_LANGUAGES = {
"1": ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn",
"zh-CN"],
"2": ["fr", "de", "es", "ca", "it", "ru", "zh-CN", "pt", "fa", "et",
"mn", "nl", "tr", "ar", "sv-SE", "lv", "sl", "ta", "ja", "id",
"cy"]
}
EN_XX_LANGUAGES = {
"1": [],
"2": ["de", "tr", "fa", "sv-SE", "mn", "zh-CN", "cy", "ca", "sl", "et",
"id", "ar", "ta", "lv", "ja"]
}
SPLITS = ["train", "dev", "test"]
TSV_PREFIX = {"1": "covost", "2": "covost_v2"}
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--version", "-v", type=str, choices=["1", "2"],
required=True, help="CoVoST version")
parser.add_argument("--src-lang", "-s", type=str, required=True,
help="source language code")
parser.add_argument("--tgt-lang", "-t", type=str, required=True,
help="target language code")
parser.add_argument("--root", "-d", type=str, required=True,
help="root path to translation TSV and output TSVs")
parser.add_argument("--cv-tsv", type=str, required=True,
help="path to validated.tsv from Common Voice")
return parser.parse_args()
def load_df_from_tsv(path: Path):
return pd.read_csv(path, sep="\t", header=0, encoding="utf-8",
escapechar="\\", quoting=csv.QUOTE_NONE, na_filter=False)
def save_df_to_tsv(dataframe, path: Path):
dataframe.to_csv(path, sep="\t", header=True, index=False, encoding="utf-8",
escapechar="\\", quoting=csv.QUOTE_NONE)
def get_v1_split(df: pd.DataFrame, split: str):
return df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
def get_v2_split(df: pd.DataFrame, split: str):
if split == "train":
return df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
return df[df["split"] == split]
def main():
args = get_args()
ver, src, tgt = args.version, args.src_lang, args.tgt_lang
assert src != tgt and "en" in {src, tgt}
if src == "en":
assert tgt in EN_XX_LANGUAGES[ver]
else:
assert src in XX_EN_LANGUAGES[ver]
root = Path(args.root)
tsv_prefix = TSV_PREFIX[ver]
cv_tsv = load_df_from_tsv(args.cv_tsv)
covost_tsv = load_df_from_tsv(root / f"{tsv_prefix}.{src}_{tgt}.tsv")
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
for split in SPLITS:
get_split = {"1": get_v1_split, "2": get_v2_split}.get(ver, None)
assert get_split is not None
cur_df = get_split(df, split)
cur_df = cur_df[["path", "sentence", "translation", "client_id"]]
save_df_to_tsv(cur_df, root / f"{tsv_prefix}.{src}_{tgt}.{split}.tsv")
if __name__ == "__main__":
main()
| covost-main | get_covost_splits.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import os.path as op
import urllib.request
from tqdm import tqdm
LANG_CODE_2_TO_3 = {
'fr': 'fra', 'de': 'deu', 'nl': 'nld', 'ru': 'rus', 'en': 'eng', 'es': 'spa'
}
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='data/tt/mp3',
help='root path for MP3 files')
return parser.parse_args()
def _download_mp3(root: str, lang: str, s_id: str, overwrite=False):
path = op.join(root, f'{s_id}.mp3')
if not overwrite and op.isfile(path):
return
url = f'https://audio.tatoeba.org/sentences/{lang}/{s_id}.mp3'
try:
urllib.request.urlretrieve(url, path)
except Exception as e:
print(e, url)
return str(e)
def main():
args = get_args()
if not op.isdir(args.root):
os.makedirs(args.root)
for lang in LANG_CODE_2_TO_3:
print(f'Downloading {lang} speeches...')
lang_3 = LANG_CODE_2_TO_3[lang]
with open(f'data/tt/tatoeba20191004.s2t.{lang}_en.tsv') as f:
next(f)
ids = [r.strip().split('\t')[0] for r in f]
for i in tqdm(ids):
_download_mp3(args.root, lang_3, i)
if __name__ == '__main__':
main()
| covost-main | get_tt_speech.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
### useage ###
# (run w/ gpu): python dempStream.py --tx_cuda 1 --rx_cuda 2 --model libritts_v1 --input_device x --output_device o
# (run w/ cpu): python dempStream.py --tx_cuda -1 --rx_cuda -1 --model libritts_sym --input_device x --output_device o
import torch
import argparse
from utils.audiodec import AudioDec, AudioDecStreamer, assign_model
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="libritts_sym")
parser.add_argument("-i", "--input", type=str, default="input.wav")
parser.add_argument("-o", "--output", type=str, default="output.wav")
parser.add_argument('--tx_cuda', type=int, default=-1 )
parser.add_argument('--rx_cuda', type=int, default=-1 )
parser.add_argument('--input_device', type=int, default=1)
parser.add_argument('--output_device', type=int, default=4)
parser.add_argument('--frame_size', type=int, default=1200)
parser.add_argument('--num_threads', type=int, default=4)
args = parser.parse_args()
# device assignment
if args.tx_cuda < 0:
tx_device = f'cpu'
else:
tx_device = f'cuda:{args.tx_cuda}'
if args.rx_cuda < 0:
rx_device = f'cpu'
else:
rx_device = f'cuda:{args.rx_cuda}'
torch.set_num_threads(args.num_threads)
# model assignment
sample_rate, encoder_checkpoint, decoder_checkpoint = assign_model(args.model)
# AudioDec initinalize
print("AudioDec initinalizing!")
audiodec = AudioDec(tx_device=tx_device, rx_device=rx_device)
audiodec.load_transmitter(encoder_checkpoint)
audiodec.load_receiver(encoder_checkpoint, decoder_checkpoint)
# Streamer initinalize
print("Streamer initinalizing!")
streamer = AudioDecStreamer(
input_device=args.input_device,
output_device=args.output_device,
frame_size=args.frame_size,
sample_rate=sample_rate,
tx_encoder=audiodec.tx_encoder,
tx_device=tx_device,
rx_encoder=audiodec.rx_encoder,
decoder=audiodec.decoder,
rx_device=rx_device,
)
streamer.enable_filedump(
input_stream_file=args.input,
output_stream_file=args.output,
)
# run
print("Ready to run!")
latency="low"
# TODO this is responsible for ~100ms latency, seems to be driver dependent. latency=0 works on Mac but not on Windows
streamer.run(latency)
if __name__ == "__main__":
main()
| AudioDec-main | demoStream.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
### useage ###
# (run w/ gpu): python demoFile.py --model libritts_v1 -i xxx.wav -o ooo.wav
# (run w/ cpu): python demoFile.py --cuda -1 --model libritts_sym -i xxx.wav -o ooo.wav
import os
import torch
import argparse
import numpy as np
import soundfile as sf
from utils.audiodec import AudioDec, assign_model
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="libritts_v1")
parser.add_argument("-i", "--input", type=str, required=True)
parser.add_argument("-o", "--output", type=str, required=True)
parser.add_argument('--cuda', type=int, default=0 )
parser.add_argument('--num_threads', type=int, default=4)
args = parser.parse_args()
# device assignment
if args.cuda < 0:
tx_device = f'cpu'
rx_device = f'cpu'
else:
tx_device = f'cuda:{args.cuda}'
rx_device = f'cuda:{args.cuda}'
torch.set_num_threads(args.num_threads)
# model assignment
sample_rate, encoder_checkpoint, decoder_checkpoint = assign_model(args.model)
# AudioDec initinalize
print("AudioDec initinalizing!")
audiodec = AudioDec(tx_device=tx_device, rx_device=rx_device)
audiodec.load_transmitter(encoder_checkpoint)
audiodec.load_receiver(encoder_checkpoint, decoder_checkpoint)
with torch.no_grad():
if os.path.exists(args.input):
data, fs = sf.read(args.input, always_2d=True)
else:
raise ValueError(f'Input file {args.input} does not exist!')
assert fs == sample_rate, f"data ({fs}Hz) is not matched to model ({sample_rate}Hz)!"
x = np.expand_dims(data.transpose(1, 0), axis=1) # (T, C) -> (C, 1, T)
x = torch.tensor(x, dtype=torch.float).to(tx_device)
print("Encode/Decode...")
z = audiodec.tx_encoder.encode(x)
idx = audiodec.tx_encoder.quantize(z)
zq = audiodec.rx_encoder.lookup(idx)
y = audiodec.decoder.decode(zq)[:, :, :x.size(-1)]
y = y.squeeze(1).transpose(1, 0).cpu().numpy() # T x C
sf.write(
args.output,
y,
fs,
"PCM_16",
)
print(f"Output {args.output}!")
if __name__ == "__main__":
main()
| AudioDec-main | demoFile.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os
import sys
import yaml
import torch
import logging
import argparse
import numpy as np
import soundfile as sf
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from dataloader import SingleDataset
from models.autoencoder.AudioDec import Generator as generator_audiodec
class StatisticMain(object):
def __init__(self, args,):
# set logger
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# device
if not torch.cuda.is_available():
self.device = torch.device('cpu')
logging.info(f"device: cpu")
else:
self.device = torch.device('cuda')
logging.info(f"device: gpu")
# initialize config
with open(args.config, 'r') as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
# initialize attribute
self.stats_path = self.config['stats']
self.analyzer_checkpoint = self.config['analyzer']
self.analyzer_config = self._load_config(self.analyzer_checkpoint)
self.model_type = self.analyzer_config.get('model_type', 'symAudioDec')
os.makedirs(os.path.dirname(self.stats_path), exist_ok=True)
def _load_config(self, checkpoint, config_name='config.yml'):
dirname = os.path.dirname(checkpoint)
config_path = os.path.join(dirname, config_name)
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
return config
def load_dataset(self, subset, subset_num):
audio_path = os.path.join(
self.config['data']['path'],
self.config['data']['subset'][subset],
)
assert os.path.exists(audio_path), f"{audio_path} does not exist!"
self.dataset = SingleDataset(
files=audio_path,
query="*.wav",
load_fn=sf.read,
return_utt_id=False,
subset_num=subset_num,
)
logging.info(f"The number of {subset} audio files = {len(self.dataset)}.")
def load_analyzer(self):
if self.model_type in ['symAudioDec', 'symAudioDecUniv']:
analyzer = generator_audiodec
else:
raise NotImplementedError(f"Analyzer {self.model_type} is not supported!")
self.analyzer = analyzer(**self.analyzer_config['generator_params'])
self.analyzer.load_state_dict(
torch.load(self.analyzer_checkpoint, map_location='cpu')['model']['generator'])
self.analyzer = self.analyzer.eval().to(self.device)
logging.info(f"Loaded Analyzer from {self.analyzer_checkpoint}.")
def audio_analysis(self, audio):
x = torch.tensor(audio, dtype=torch.float).to(self.device)
x = x.transpose(1, 0).unsqueeze(0) # (T, C) -> (1, C, T)
x = self.analyzer.encoder(x)
z = self.analyzer.projector(x)
zq, _, _ = self.analyzer.quantizer(z)
return zq.squeeze(0).transpose(1, 0).cpu().numpy() # (T', C)
def run(self):
with torch.no_grad(), tqdm(self.dataset, desc="[statistic]") as pbar:
scaler = StandardScaler()
for idx, x in enumerate(pbar, 1):
zq = self.audio_analysis(x)
scaler.partial_fit(zq)
stats = np.stack([scaler.mean_, scaler.scale_], axis=0)
np.save(
self.stats_path,
stats.astype(np.float32),
allow_pickle=False,
)
logging.info(f"Finished statistical calculation of {idx} utterances.")
def main():
"""Run feature extraction process."""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True)
parser.add_argument("--subset", type=str, default="train")
parser.add_argument("--subset_num", type=int, default=-1)
args = parser.parse_args()
# initial statistic_main
statistic_main = StatisticMain(args=args)
# load dataset
statistic_main.load_dataset(args.subset, args.subset_num)
# load analyzer
statistic_main.load_analyzer()
# run testing
statistic_main.run()
if __name__ == "__main__":
main()
| AudioDec-main | codecStatistic.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os
import logging
import argparse
import torch
import soundfile as sf
from torch.utils.data import DataLoader
from dataloader import CollaterAudio, CollaterAudioPair
from dataloader import SingleDataset, MultiDataset
from models.autoencoder.AudioDec import Generator as generator_audiodec
from models.vocoder.HiFiGAN import Generator as generator_hifigan
from models.vocoder.HiFiGAN import Discriminator as discriminator_hifigan
from models.vocoder.UnivNet import Discriminator as discriminator_univnet
from trainer.autoencoder import Trainer as TrainerAutoEncoder
from trainer.vocoder import Trainer as TrainerVocoder
from trainer.denoise import Trainer as TrainerDenoise
from bin.train import TrainGAN
from losses import DiscriminatorAdversarialLoss
from losses import FeatureMatchLoss
from losses import GeneratorAdversarialLoss
from losses import MultiResolutionSTFTLoss
from losses import MultiMelSpectrogramLoss
from losses import MultiWindowShapeLoss
class TrainMain(TrainGAN):
def __init__(self, args,):
super(TrainMain, self).__init__(args=args,)
self.train_mode = self.config.get('train_mode', 'autoencoder')
self.model_type = self.config.get('model_type', 'symAudioDec')
self.data_path = self.config['data']['path']
# DATA LOADER
def initialize_data_loader(self):
logging.info(f"Loading datasets... (batch_lenght: {self.batch_length})")
if self.train_mode in ['autoencoder', 'vocoder']:
train_set = self._audio('train')
valid_set = self._audio('valid')
collater = CollaterAudio(batch_length=self.batch_length)
elif self.train_mode in ['denoise']:
train_set = self._audio_pair('noisy_train', 'clean_train')
valid_set = self._audio_pair('noisy_valid', 'clean_valid')
collater = CollaterAudioPair(batch_length=self.batch_length)
else:
raise NotImplementedError(f"Train mode: {self.train_mode} is not supported!")
logging.info(f"The number of training files = {len(train_set)}.")
logging.info(f"The number of validation files = {len(valid_set)}.")
dataset = {'train': train_set, 'dev': valid_set}
self._data_loader(dataset, collater)
def _data_loader(self, dataset, collater):
self.data_loader = {
'train': DataLoader(
dataset=dataset['train'],
shuffle=True,
collate_fn=collater,
batch_size=self.config['batch_size'],
num_workers=self.config['num_workers'],
pin_memory=self.config['pin_memory'],
),
'dev': DataLoader(
dataset=dataset['dev'],
shuffle=False,
collate_fn=collater,
batch_size=self.config['batch_size'],
num_workers=self.config['num_workers'],
pin_memory=self.config['pin_memory'],
),
}
def _audio(self, subset, subset_num=-1, return_utt_id=False):
audio_dir = os.path.join(
self.data_path, self.config['data']['subset'][subset])
params = {
'files': audio_dir,
'query': "*.wav",
'load_fn': sf.read,
'return_utt_id': return_utt_id,
'subset_num': subset_num,
}
return SingleDataset(**params)
def _audio_pair(self, subset_n, subset_c, subset_num=-1, return_utt_id=False):
audio_n_dir = os.path.join(
self.data_path, self.config['data']['subset'][subset_n])
audio_c_dir = os.path.join(
self.data_path, self.config['data']['subset'][subset_c])
params = {
'multi_files': [audio_c_dir, audio_n_dir], # (main, sub)
'queries': ["*.wav"]*2,
'load_fns': [sf.read]*2,
'return_utt_id': return_utt_id,
'subset_num': subset_num,
}
return MultiDataset(**params)
# MODEL ARCHITECTURE
def define_model(self):
# generator
generator = self._define_generator(self.model_type)
self.model['generator'] = generator.to(self.device)
# discriminator
discriminator = self._define_discriminator(self.model_type)
self.model['discriminator'] = discriminator.to(self.device)
# optimizer
self._define_optimizer_scheduler()
#self._show_setting()
def _define_generator(self, model_type):
if model_type in ['symAudioDec', 'symAudioDecUniv']:
generator = generator_audiodec
elif model_type in ['HiFiGAN', 'UnivNet']:
generator = generator_hifigan
else:
raise NotImplementedError(f"Model type: {model_type} is not supported for the generator!")
return generator(**self.config['generator_params'])
def _define_discriminator(self, model_type):
if model_type in ['symAudioDec', 'HiFiGAN']:
discriminator = discriminator_hifigan
elif model_type in ['symAudioDecUniv', 'UnivNet']:
discriminator = discriminator_univnet
else:
raise NotImplementedError(f"Model type: {model_type} is not supported for the discriminator!")
return discriminator(**self.config['discriminator_params'])
def _define_optimizer_scheduler(self):
generator_optimizer_class = getattr(
torch.optim,
self.config['generator_optimizer_type']
)
discriminator_optimizer_class = getattr(
torch.optim,
self.config['discriminator_optimizer_type']
)
self.optimizer = {
'generator': generator_optimizer_class(
self.model['generator'].parameters(),
**self.config['generator_optimizer_params'],
),
'discriminator': discriminator_optimizer_class(
self.model['discriminator'].parameters(),
**self.config['discriminator_optimizer_params'],
),
}
generator_scheduler_class = getattr(
torch.optim.lr_scheduler,
self.config.get('generator_scheduler_type', "StepLR"),
)
discriminator_scheduler_class = getattr(
torch.optim.lr_scheduler,
self.config.get('discriminator_scheduler_type', "StepLR"),
)
self.scheduler = {
'generator': generator_scheduler_class(
optimizer=self.optimizer['generator'],
**self.config['generator_scheduler_params'],
),
'discriminator': discriminator_scheduler_class(
optimizer=self.optimizer['discriminator'],
**self.config['discriminator_scheduler_params'],
),
}
# CRITERIA
def define_criterion(self):
self.criterion = {
'gen_adv': GeneratorAdversarialLoss(
**self.config['generator_adv_loss_params']).to(self.device),
'dis_adv': DiscriminatorAdversarialLoss(
**self.config['discriminator_adv_loss_params']).to(self.device),
}
if self.config.get('use_feat_match_loss', False):
self.criterion['feat_match'] = FeatureMatchLoss(
**self.config.get('feat_match_loss_params', {}),
).to(self.device)
if self.config.get('use_mel_loss', False):
self.criterion['mel'] = MultiMelSpectrogramLoss(
**self.config['mel_loss_params'],
).to(self.device)
if self.config.get('use_stft_loss', False):
self.criterion['stft'] = MultiResolutionSTFTLoss(
**self.config['stft_loss_params'],
).to(self.device)
if self.config.get('use_shape_loss', False):
self.criterion['shape'] = MultiWindowShapeLoss(
**self.config['shape_loss_params'],
).to(self.device)
# TRAINER
def define_trainer(self):
if self.train_mode in ['autoencoder']:
trainer = TrainerAutoEncoder
elif self.train_mode in ['vocoder']:
trainer = TrainerVocoder
elif self.train_mode in ['denoise']:
trainer = TrainerDenoise
else:
raise NotImplementedError(f"Train mode: {self.train_mode} is not supported for Trainer!")
trainer_parameters = {}
trainer_parameters['steps'] = 0
trainer_parameters['epochs'] = 0
trainer_parameters['data_loader'] = self.data_loader
trainer_parameters['model'] = self.model
trainer_parameters['criterion'] = self.criterion
trainer_parameters['optimizer'] = self.optimizer
trainer_parameters['scheduler'] = self.scheduler
trainer_parameters['config'] = self.config
trainer_parameters['device'] = self.device
self.trainer = trainer(**trainer_parameters)
# MODEL INITIALIZATION
def initialize_model(self):
initial = self.config.get("initial", "")
if os.path.exists(self.resume): # resume from trained model
self.trainer.load_checkpoint(self.resume)
logging.info(f"Successfully resumed from {self.resume}.")
elif os.path.exists(initial): # initial new model with the pre-trained model
self.trainer.load_checkpoint(initial, load_only_params=True)
logging.info(f"Successfully initialize parameters from {initial}.")
else:
logging.info("Train from scrach")
# load the pre-trained encoder for vocoder training
if self.train_mode in ['vocoder']:
analyzer_checkpoint = self.config.get("analyzer", "")
assert os.path.exists(analyzer_checkpoint), f"Analyzer {analyzer_checkpoint} does not exist!"
analyzer_config = self._load_config(analyzer_checkpoint)
self._initialize_analyzer(analyzer_config, analyzer_checkpoint)
def _initialize_analyzer(self, config, checkpoint):
model_type = config.get('model_type', 'symAudioDec')
if model_type in ['symAudioDec', 'symAudioDecUniv']:
analyzer = generator_audiodec
else:
raise NotImplementedError(f"Model type: {model_type} is not supported for the analyzer!")
self.model['analyzer'] = analyzer(**config['generator_params']).to(self.device)
self.model['analyzer'].load_state_dict(
torch.load(checkpoint, map_location='cpu')['model']['generator'])
logging.info(f"Successfully load analyzer from {checkpoint}.")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True)
parser.add_argument("--tag", type=str, required=True)
parser.add_argument("--exp_root", type=str, default="exp")
parser.add_argument("--resume", default="", type=str, nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--disable_cudnn', choices=('True','False'), default='False', help='Disable CUDNN')
args = parser.parse_args()
# initial train_main
train_main = TrainMain(args=args)
# get dataset
train_main.initialize_data_loader()
# define models, optimizers, and schedulers
train_main.define_model()
# define criteria
train_main.define_criterion()
# define trainer
train_main.define_trainer()
# model initialization
train_main.initialize_model()
# run training loop
train_main.run()
if __name__ == "__main__":
main()
| AudioDec-main | codecTrain.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os
import torch
import logging
import argparse
import soundfile as sf
from dataloader import SingleDataset
from models.autoencoder.AudioDec import Generator as generator_audiodec
from models.vocoder.HiFiGAN import Generator as generator_hifigan
from bin.test import TestGEN
class TestMain(TestGEN):
def __init__(self, args,):
super(TestMain, self).__init__(args=args,)
self.encoder_type = self.encoder_config.get('model_type', 'symAudioDec')
self.decoder_type = self.decoder_config.get('model_type', 'symAudioDec')
if self.encoder_config['generator_params']['input_channels'] > 1:
self.multi_channel = True
else:
self.multi_channel = False
# LOAD DATASET
def load_dataset(self, subset, subset_num):
data_path = os.path.join(
self.encoder_config['data']['path'],
self.encoder_config['data']['subset'][subset]
)
assert os.path.exists(data_path), f"{data_path} does not exist!"
self.dataset = SingleDataset(
files=data_path,
query="*.wav",
load_fn=sf.read,
return_utt_id=True,
subset_num=subset_num,
)
logging.info(f"The number of utterances = {len(self.dataset)}.")
# LOAD MODEL
def load_encoder(self):
if self.encoder_type in ['symAudioDec', 'symAudioDecUniv']:
encoder = generator_audiodec
else:
raise NotImplementedError(f"Encoder {self.encoder_type} is not supported!")
self.encoder = encoder(**self.encoder_config['generator_params'])
self.encoder.load_state_dict(
torch.load(self.encoder_checkpoint, map_location='cpu')['model']['generator'])
self.encoder = self.encoder.eval().to(self.device)
logging.info(f"Loaded Encoder from {self.encoder_checkpoint}.")
def load_decoder(self):
if self.decoder_type in ['symAudioDec', 'symAudioDecUniv']:
decoder = generator_audiodec
elif self.decoder_type in ['HiFiGAN', 'UnivNet']:
decoder = generator_hifigan
else:
raise NotImplementedError(f"Decoder {self.decoder_type} is not supported!")
self.decoder = decoder(**self.decoder_config['generator_params'])
self.decoder.load_state_dict(
torch.load(self.decoder_checkpoint, map_location='cpu')['model']['generator'])
self.decoder = self.decoder.eval().to(self.device)
logging.info(f"Loaded Decoder from {self.decoder_checkpoint}.")
def encode(self, audio):
x = torch.tensor(audio, dtype=torch.float).to(self.device)
if self.multi_channel:
x = x.transpose(1, 0).unsqueeze(0) # (T, C) -> (1, C, T)
else:
x = x.transpose(1, 0).unsqueeze(1) # (T, C) -> (C, 1, T)
x = self.encoder.encoder(x)
z = self.encoder.projector(x)
zq, _, _ = self.encoder.quantizer(z)
return zq
def decode(self, zq):
if self.decoder_type in ['HiFiGAN', 'UnivNet']:
y = self.decoder(zq)
else:
y = self.decoder.decoder(zq)
return y
# INITIAL FOLDER
def initial_folder(self, subset, output_name):
# model name
encoder = os.path.dirname(self.encoder_checkpoint).split('/')[-1]
decoder = os.path.dirname(self.decoder_checkpoint).split('/')[-1]
# model checkpoint
encoder_checkpoint = os.path.basename(self.encoder_checkpoint).split('steps')[0].split('-')[-1]
decoder_checkpoint = os.path.basename(self.decoder_checkpoint).split('steps')[0].split('-')[-1]
testdir = f"{encoder}-{decoder}_{encoder_checkpoint}-{decoder_checkpoint}"
# testing set
setdir = self.encoder_config['data']['subset'][subset]
self.outdir = os.path.join(output_name, testdir, setdir)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir, exist_ok=True)
def main():
"""Run testing process."""
parser = argparse.ArgumentParser()
parser.add_argument("--subset", type=str, default="clean_test")
parser.add_argument("--subset_num", type=int, default=-1)
parser.add_argument("--encoder", type=str, required=True)
parser.add_argument("--decoder", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)
args = parser.parse_args()
# initial test_main
test_main = TestMain(args=args)
# load dataset
test_main.load_dataset(args.subset, args.subset_num)
# load model
test_main.load_encoder()
test_main.load_decoder()
# initial folder
test_main.initial_folder(args.subset, args.output_dir)
# run testing
test_main.run()
if __name__ == "__main__":
main()
| AudioDec-main | codecTest.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Adversarial loss modules."""
import torch
import torch.nn.functional as F
class GeneratorAdversarialLoss(torch.nn.Module):
"""Generator adversarial loss module."""
def __init__(
self,
average_by_discriminators=True,
loss_type="mse",
):
"""Initialize GeneratorAversarialLoss module."""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.criterion = self._mse_loss
else:
self.criterion = self._hinge_loss
def forward(self, outputs):
"""Calcualate generator adversarial loss.
Args:
outputs (Tensor or list): Discriminator outputs or list of
discriminator outputs.
Returns:
Tensor: Generator adversarial loss value.
"""
if isinstance(outputs, (tuple, list)):
adv_loss = 0.0
for i, outputs_ in enumerate(outputs):
if isinstance(outputs_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_ = outputs_[-1]
adv_loss += self.criterion(outputs_)
if self.average_by_discriminators:
adv_loss /= i + 1
else:
adv_loss = self.criterion(outputs)
return adv_loss
def _mse_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _hinge_loss(self, x):
return -x.mean()
class DiscriminatorAdversarialLoss(torch.nn.Module):
"""Discriminator adversarial loss module."""
def __init__(
self,
average_by_discriminators=True,
loss_type="mse",
):
"""Initialize DiscriminatorAversarialLoss module."""
super().__init__()
self.average_by_discriminators = average_by_discriminators
assert loss_type in ["mse", "hinge"], f"{loss_type} is not supported."
if loss_type == "mse":
self.fake_criterion = self._mse_fake_loss
self.real_criterion = self._mse_real_loss
else:
self.fake_criterion = self._hinge_fake_loss
self.real_criterion = self._hinge_real_loss
def forward(self, outputs_hat, outputs):
"""Calcualate discriminator adversarial loss.
Args:
outputs_hat (Tensor or list): Discriminator outputs or list of
discriminator outputs calculated from generator outputs.
outputs (Tensor or list): Discriminator outputs or list of
discriminator outputs calculated from groundtruth.
Returns:
Tensor: Discriminator real loss value.
Tensor: Discriminator fake loss value.
"""
if isinstance(outputs, (tuple, list)):
real_loss = 0.0
fake_loss = 0.0
for i, (outputs_hat_, outputs_) in enumerate(zip(outputs_hat, outputs)):
if isinstance(outputs_hat_, (tuple, list)):
# NOTE(kan-bayashi): case including feature maps
outputs_hat_ = outputs_hat_[-1]
outputs_ = outputs_[-1]
real_loss += self.real_criterion(outputs_)
fake_loss += self.fake_criterion(outputs_hat_)
if self.average_by_discriminators:
fake_loss /= i + 1
real_loss /= i + 1
else:
real_loss = self.real_criterion(outputs)
fake_loss = self.fake_criterion(outputs_hat)
return real_loss, fake_loss
def _mse_real_loss(self, x):
return F.mse_loss(x, x.new_ones(x.size()))
def _mse_fake_loss(self, x):
return F.mse_loss(x, x.new_zeros(x.size()))
def _hinge_real_loss(self, x):
return -torch.mean(torch.min(x - 1, x.new_zeros(x.size())))
def _hinge_fake_loss(self, x):
return -torch.mean(torch.min(-x - 1, x.new_zeros(x.size())))
| AudioDec-main | losses/adversarial_loss.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""STFT-based loss modules."""
import torch
import torch.nn.functional as F
def stft(x, fft_size, hop_size, win_length, window, eps=1e-7):
"""Perform STFT and convert to magnitude spectrogram.
Args:
x (Tensor): Input signal tensor (B, T).
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length.
window (str): Window function type.
Returns:
Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
"""
x_stft = torch.stft(x, fft_size, hop_size, win_length, window, return_complex=True)
x_power = x_stft.real ** 2 + x_stft.imag ** 2
return torch.sqrt(torch.clamp(x_power, min=eps)).transpose(2, 1)
class SpectralConvergenceLoss(torch.nn.Module):
"""Spectral convergence loss module."""
def __init__(self):
"""Initilize spectral convergence loss module."""
super(SpectralConvergenceLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
class LogSTFTMagnitudeLoss(torch.nn.Module):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initilize los STFT magnitude loss module."""
super(LogSTFTMagnitudeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Log STFT magnitude loss value.
"""
return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
class STFTLoss(torch.nn.Module):
"""STFT loss module."""
def __init__(
self,
fft_size=1024,
hop_size=120,
win_length=600,
window="hann_window",
):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.hop_size = hop_size
self.win_length = win_length
self.spectral_convergence_loss = SpectralConvergenceLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
self.register_buffer("window", getattr(torch, window)(win_length))
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
x_mag = stft(x, self.fft_size, self.hop_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.hop_size, self.win_length, self.window)
sc_loss = self.spectral_convergence_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module."""
def __init__(
self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
"""
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fft_size, hop_size, win_length in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fft_size, hop_size, win_length, window)]
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T) or (B, #subband, T).
y (Tensor): Groundtruth signal (B, T) or (B, #subband, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
if len(x.shape) == 3:
x = x.view(-1, x.size(2)) # (B, C, T) -> (B x C, T)
y = y.view(-1, y.size(2)) # (B, C, T) -> (B x C, T)
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| AudioDec-main | losses/stft_loss.py |
from .adversarial_loss import * # NOQA
from .feat_match_loss import * # NOQA
from .mel_loss import * # NOQA
from .stft_loss import * # NOQA
from .waveform_loss import * # NOQA | AudioDec-main | losses/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.