python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
# pylint: skip-file
"""EfficientNet models modified with added film layers.
Mostly copied from third_party/py/keras/applications/efficientnet.py
"""
import copy
import math
import os
import warnings
import json
import tensorflow.compat.v2 as tf
from tensorflow.keras import layers
from robotics_transformer.film_efficientnet.film_conditioning_layer import FilmConditioning
BASE_WEIGHTS_PATH = 'efficientnet_checkpoints/efficientnet'
IMAGENET_JSON_PATH = 'efficientnet_checkpoints/imagenet_classes.json'
CLASS_INDEX = None
WEIGHTS_PATHS = {
'efficientnetb3': BASE_WEIGHTS_PATH + 'b3.h5',
'efficientnetb3_notop': BASE_WEIGHTS_PATH + 'b3_notop.h5',
}
DEFAULT_BLOCKS_ARGS = [{
'kernel_size': 3,
'repeats': 1,
'filters_in': 32,
'filters_out': 16,
'expand_ratio': 1,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 2,
'filters_in': 16,
'filters_out': 24,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 2,
'filters_in': 24,
'filters_out': 40,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 3,
'filters_in': 40,
'filters_out': 80,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 3,
'filters_in': 80,
'filters_out': 112,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 4,
'filters_in': 112,
'filters_out': 192,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 1,
'filters_in': 192,
'filters_out': 320,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
'distribution': 'truncated_normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For EfficientNet, input preprocessing is included as part of the model
(as a `Rescaling` layer), and thus
`tf.keras.applications.efficientnet.preprocess_input` is actually a
pass-through function. EfficientNet models expect their inputs to be float
tensors of pixels with values in the [0-255] range.
Args:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to True.
weights: One of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded. Defaults to 'imagenet'.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. Defaults to 1000 (number of
ImageNet classes).
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to 'softmax'.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
IMAGENET_STDDEV_RGB = [0.229, 0.224, 0.225]
def validate_activation(classifier_activation, weights):
"""validates that the classifier is compatible with the weights.
Args:
classifier_activation: str or callable activation function
weights: The pretrained weights to load.
Raises:
ValueError: if an activation other than `None` or `softmax` are used with
pretrained weights.
"""
if weights is None:
return
classifier_activation = tf.keras.activations.get(classifier_activation)
if classifier_activation not in {
tf.keras.activations.get('softmax'),
tf.keras.activations.get(None)
}:
raise ValueError('Only `None` and `softmax` activations are allowed '
'for the `classifier_activation` argument when using '
'pretrained weights, with `include_top=True`; Received: '
f'classifier_activation={classifier_activation}')
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
img_dim = 2 if tf.keras.backend.image_data_format() == 'channels_first' else 1
input_size = tf.keras.backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]), (correct[1] - adjust[1],
correct[1]))
def obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate a model's input shape.
Args:
input_shape: Either None (will return the default network input shape), or a
user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to be linked to a classifier
via a Flatten layer.
weights: One of `None` (random initialization) or 'imagenet' (pre-training
on ImageNet). If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.',
stacklevel=2)
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.',
stacklevel=2)
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting `include_top=True` '
'and loading `imagenet` weights, '
f'`input_shape` should be {default_shape}. '
f'Received: input_shape={input_shape}')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; Received '
f'`input_shape={input_shape}`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError(f'Input size must be at least {min_size}'
f'x{min_size}; Received: '
f'input_shape={input_shape}')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; Received '
f'`input_shape={input_shape}`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least '
f'{min_size}x{min_size}; Received: '
f'input_shape={input_shape}')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
f'Received: input_shape={input_shape}')
return input_shape
def EfficientNet(width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation='swish',
blocks_args='default',
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
include_film=False):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Args:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected layer at the top of the
network.
weights: one of `None` (random initialization), 'imagenet' (pre-training on
ImageNet), or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use
as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top` is
False. It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction when `include_top` is
`False`. - `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer. - `avg` means that global average
pooling will be applied to the output of the last convolutional layer, and
thus the output of the model will be a 2D tensor. - `max` means that
global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
include_film: bool, whether or not to insert film conditioning layers.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if blocks_args == 'default':
blocks_args = DEFAULT_BLOCKS_ARGS
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=tf.keras.backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if include_film:
with tf.compat.v1.variable_scope('context_input'):
context_input = layers.Input(shape=512)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.Rescaling(1. / 255.)(x)
x = layers.Normalization(axis=bn_axis)(x)
# Note that the normaliztion layer uses square value of STDDEV as the
# variance for the layer: result = (input - mean) / sqrt(var)
# However, the original implemenetation uses (input - mean) / var to
# normalize the input, we need to divide another sqrt(var) to match the
# original implementation.
# See https://github.com/tensorflow/tensorflow/issues/49930 for more details
# We always apply this transformation, even when not using imagenet weights,
# because it needs to be in the graph when grafting weights from imagenet
# pretrained models.
x = layers.Rescaling(1. / tf.math.sqrt(IMAGENET_STDDEV_RGB))(x)
x = layers.ZeroPadding2D(padding=correct_pad(x, 3), name='stem_conv_pad')(x)
x = layers.Conv2D(
round_filters(32),
3,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(
x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(round_repeats(args['repeats']) for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'])
args['filters_out'] = round_filters(args['filters_out'])
for j in range(round_repeats(args.pop('repeats'))):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x,
activation,
drop_connect_rate * b / blocks,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args)
if include_film:
with tf.compat.v1.variable_scope('film_conditioning'):
x = FilmConditioning(num_channels=x.shape[-1])(x, context_input)
b += 1
# Build top
x = layers.Conv2D(
round_filters(1280),
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='top_conv')(
x)
x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
x = layers.Activation(activation, name='top_activation')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name='top_dropout')(x)
validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name='predictions')(
x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = tf.keras.utils.get_source_inputs(input_tensor)
else:
inputs = img_input
if include_film:
inputs = (img_input, context_input)
# Create model.
model = tf.keras.Model(inputs, x, name=model_name)
# Load weights.
if weights == 'imagenet':
if include_top:
key = model_name
else:
key = model_name + '_notop'
weights_path = os.path.join(os.path.dirname(__file__), WEIGHTS_PATHS[key])
model.load_weights(weights_path, skip_mismatch=False, by_name=False)
elif weights is not None:
model.load_weights(weights, skip_mismatch=False, by_name=False)
return model
def block(inputs,
activation='swish',
drop_rate=0.,
name='',
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=0.,
id_skip=True):
"""An inverted residual block.
Args:
inputs: input tensor.
activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
Returns:
output tensor for the block.
"""
bn_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'expand_conv')(
inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
x = layers.Activation(activation, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=correct_pad(x, kernel_size), name=name + 'dwconv_pad')(
x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'dwconv')(
x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
x = layers.Activation(activation, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=name + 'se_reshape')(se)
se = layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_reduce')(
se)
se = layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_expand')(
se)
x = layers.multiply([x, se], name=name + 'se_excite')
# Output phase
x = layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'project_conv')(
x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(
x)
x = layers.add([x, inputs], name=name + 'add')
return x
def maybe_restore_with_film(
*args,
weights='imagenet',
include_film=False,
**kwargs,
):
n1 = EfficientNet(*args, weights=weights, include_film=False, **kwargs)
if not include_film:
return n1
# Copy the model weights over to a new model. This is necessary
# in case we have inserted early film layers. In this case,
# the pretrained weights will fail to restore properly
# unless we do this trick.
n2 = EfficientNet(*args, weights=None, include_film=True, **kwargs)
# The layers without the film layers.
l1 = {l.name: l for l in n1.layers}
# The layers with the film layers.
l2 = {l.name: l for l in n2.layers}
for layer_name, layer in l2.items():
if layer_name in l1:
layer.set_weights(l1[layer_name].get_weights())
# Annoyingly, the rescaling and normalization layers get different names
# in each graph.
elif 'rescaling' in layer_name:
_, num = layer_name.split('_')
l1_layer_name = 'rescaling_' + str(int(num) - 2 or '')
l1_layer_name = l1_layer_name.rstrip('_')
layer.set_weights(l1[l1_layer_name].get_weights())
elif 'normalization' in layer_name:
_, num = layer_name.split('_')
l1_layer_name = 'normalization_' + str(int(num) - 1 or '')
l1_layer_name = l1_layer_name.rstrip('_')
layer.set_weights(l1[l1_layer_name].get_weights())
return n2
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
include_film=False,
**kwargs):
return maybe_restore_with_film(
1.2,
1.4,
300,
0.3,
model_name='efficientnetb3',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_film=include_film,
**kwargs)
EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB3')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the efficientnet model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting `tf.keras.image_data_format() is
used (unless you changed it, it defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
def decode_predictions(preds, top=5):
global CLASS_INDEX
if CLASS_INDEX is None:
with open(os.path.join(os.path.dirname(__file__), IMAGENET_JSON_PATH)) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
| CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/film_efficientnet_encoder.py |
from setuptools import setup
setup(name='gato', packages=['gato'])
| CyberTron-master | cybertron/models/gato2/setup.py |
import argparse
import random
import os
import wandb
import torch
from gato.utils.utils import DotDict
from gato.policy.gato_policy import GatoPolicy
from gato.envs.setup_env import load_envs
from gato.training.trainer import Trainer
from gato.tasks.control_task import ControlTask
def main(args):
exp_id = random.randint(int(1e5), int(1e6) - 1)
exp_name = f'gato-control-{exp_id}'
envs, datasets = load_envs(args.datasets) # Load Minari datasets and corresponding Gym environments
tasks = []
for env, dataset in zip(envs, datasets):
task = ControlTask(
env.unwrapped.spec.id,
env,
dataset,
args = args,
context_len = args.sequence_length,
training_prompt_len_proportion=args.prompt_len_proportion,
share_prompt_episodes = not args.unique_prompt_episodes,
top_k_prompting = args.top_k
)
tasks.append(task)
model = GatoPolicy(
device=args.device,
embed_dim=args.embed_dim,
layers=args.layers,
heads=args.heads,
dropout=args.dropout,
mu=args.mu,
M=args.M,
patch_size=args.patch_size,
resid_mid_channels=args.resid_mid_channels,
continuous_tokens=args.continuous_tokens,
discrete_tokens=args.discrete_tokens,
context_len=args.sequence_length,
use_patch_pos_encoding=not args.disable_patch_pos_encoding,
use_pos_encoding=not args.disable_inner_pos_encoding,
activation_fn=args.activation_fn,
pretrained_lm=args.pretrained_lm,
)
if args.init_checkpoint is not None:
print('Loading model from checkpoint:', args.init_checkpoint)
model.load_state_dict(torch.load(args.init_checkpoint, map_location=args.device))
# print trainable parameters
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Trainable Parameters:', '{}M'.format(params / 1e6))
args.trainable_params = params
model = model.to(args.device)
model.device = args.device
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.learning_rate,
betas=(args.beta_1, args.beta_2),
eps=args.adam_eps,
weight_decay=args.weight_decay,
)
if args.use_wandb:
wandb.init(
name = exp_name,
project=args.wandb_project,
config=args,
)
# Create save dir if does not exist
if args.save_model and not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
trainer = Trainer(
model = model,
optimizer = optimizer,
tasks = tasks,
exp_name = exp_name,
args=args
)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda') # e.g. cuda:0
# Input & tokenization
parser.add_argument('--sequence_length', '-k', type=int, default=1024) # number of tokens in seq
parser.add_argument('--patch_size', type=int, default=16) # image patch size
parser.add_argument('--resid_mid_channels', type=int, default=128) # number of channels in residual MLP
parser.add_argument('--num_groups', type=int, default=32) # GroupNorm groups in ResNet
parser.add_argument('--patch_position_vocab_size', type=int, default=128)
parser.add_argument('--disable_patch_pos_encoding', action='store_true', default=False)
parser.add_argument('--disable_inner_pos_encoding', action='store_true', default=False)
parser.add_argument('--mu','-mu', type=int, default=100) # mu-law encoding
parser.add_argument('--M', '-M', type=int, default=256)
#parser.add_argument('--vocab_size', type=int, default=32000) # number of tokens from SentencePiece
parser.add_argument('--continuous_tokens', type=int, default=1024) # number of tokens for continuous values (e.g. actions, observations)
parser.add_argument('--discrete_tokens', type=int, default=1024) # number of discrete action tokens
# transformer architecture hyperparameters
parser.add_argument('--pretrained_lm', type=str, default=None) # Init with pretrained LM override embed_dim, layers, heads, activation_fn
parser.add_argument('--init_checkpoint', type=str, default=None) # Will not override architecture, only load weights from Gato checkpoint
parser.add_argument('--embed_dim', type=int, default=768)
parser.add_argument('--layers', type=int, default=8)
parser.add_argument('--heads', type=int, default=24)
parser.add_argument('--activation_fn', type=str, default='gelu')
#parser.add_argument('--activation_fn', type=str, default='geglu')
# training hyperparameters
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--beta_1', type=float, default=0.9)
parser.add_argument('--beta_2', type=float, default=0.95)
parser.add_argument('--adam_eps', type=float, default=1e-8)
parser.add_argument('--weight_decay', type=float, default=0.1)
parser.add_argument('--grad_norm_clip', type=float, default=1.0)
parser.add_argument('--disable_grad_clip', action='store_true', default=False)
parser.add_argument('--warmup_steps', type=int, default=15000)
parser.add_argument('--init_lr', type=float, default=1e-7) # starting LR for warmup
parser.add_argument('--learning_rate', '-lr',type=float, default=1e-4) # the maximum LR after warmup
parser.add_argument('--min_factor', type=float, default=10.0) # the minimum LR factor, e.g. w/ 10, base 1e-4 -> 1e-5 for Cosine Decay
parser.add_argument('--disable_cosine_decay', action='store_true', default=False) # disable cosine decay
parser.add_argument('--training_steps', type=int, default=1_000_000)
parser.add_argument('--log_eval_freq', type=int, default=100_000)
# evaluation
parser.add_argument('--eval_episodes', type=int, default=10)
parser.add_argument('--eval_mode', type=str, default='deterministic', choices=['deterministic', 'stochastic'])
parser.add_argument('--promptless_eval', action='store_true', default=False)
# datasets / envs
parser.add_argument('--datasets', type=str, nargs='+', default=['d4rl_halfcheetah-expert-v2'])
# params for sampling from datasets
parser.add_argument('--prompt_ep_proportion', type=float, default=0.25) # proportion of episodes that are prompted
parser.add_argument('--prompt_len_proportion', type=float, default=0.5) # proportion of context consumed by prompt
parser.add_argument('--unique_prompt_episodes', default=False, action='store_true')
parser.add_argument('--top_k', type=int, default=None) # sample prompts only from top k episodes
# logging
parser.add_argument('--use_wandb', '-w', action='store_true', default=False)
parser.add_argument('--wandb_project', type=str, default='gato-control')
# saving
parser.add_argument('--save_model', action='store_true', default=False)
parser.add_argument('--save_mode', type=str, default='last', choices=['checkpoint', 'last']) # Checkpoit saves model every after each log_eval_freq steps
parser.add_argument('--save_dir', type=str, default='models')
args = parser.parse_args()
args = DotDict(vars(args))
# Checks
assert args.training_steps % args.log_eval_freq == 0, 'training_steps must be divisible by eval_freq'
assert args.training_steps > args.warmup_steps, 'training_steps must be greater than warmup_steps'
assert args.learning_rate > args.init_lr, 'learning_rate must be greater than init_lr'
# make sure proportions are between 0 and 1
assert 0 <= args.prompt_ep_proportion <= 1, 'prompt_ep_proportion must be between 0 and 1'
assert 0 <= args.prompt_len_proportion <= 1, 'prompt_len_proportion must be between 0 and 1'
main(args) | CyberTron-master | cybertron/models/gato2/train.py |
import argparse
import os
import json
import time
import torch
from gato.utils.utils import DotDict
from gato.policy.gato_policy import GatoPolicy
from gato.envs.setup_env import load_envs
from gato.tasks.control_task import ControlTask
def main(args):
# load checkpoint
gato_checkpoint = torch.load(args.model_path, map_location=args.device)
# load args
if args.args_path is None:
args_path = os.path.join(os.path.dirname(args.model_path), 'args.json')
else:
args_path = args.args_path
training_args = json.load(open(args_path, 'r'))
if 'pretrained_lm' in training_args:
del training_args['pretrained_lm']
# update args with eval_args
for k, v in args.items():
if v is not None:
training_args[k] = v
eval_args = DotDict(training_args)
env_args = {
'render_mode': 'human' if args.render else None,
}
envs, datasets = load_envs(eval_args.datasets, env_args) # Load Minari datasets and corresponding Gym environments
tasks = []
env_names = []
for env, dataset in zip(envs, datasets):
task = ControlTask(
env.unwrapped.spec.id,
env,
dataset,
args = eval_args,
context_len=eval_args.sequence_length,
training_prompt_len_proportion=eval_args.prompt_len_proportion,
share_prompt_episodes = not eval_args.unique_prompt_episodes,
top_k_prompting = args.top_k
)
env_names.append(env.unwrapped.spec.id)
tasks.append(task)
print('Evaluating on envs:', env_names)
model = GatoPolicy(
device=eval_args.device,
embed_dim=eval_args.embed_dim,
layers=eval_args.layers,
heads=eval_args.heads,
dropout=eval_args.dropout,
mu=eval_args.mu,
M=eval_args.M,
patch_size=eval_args.patch_size,
resid_mid_channels=eval_args.resid_mid_channels,
continuous_tokens=eval_args.continuous_tokens,
discrete_tokens=eval_args.discrete_tokens,
context_len=eval_args.sequence_length,
use_patch_pos_encoding=not eval_args.disable_patch_pos_encoding,
use_pos_encoding=not eval_args.disable_inner_pos_encoding,
activation_fn=eval_args.activation_fn,
)
model.load_state_dict(gato_checkpoint)
model = model.to(eval_args.device)
model.device = eval_args.device
logs = {}
model.eval()
eval_start = time.time()
# loop over eval for each env
with torch.no_grad():
for task in tasks:
eval_logs = task.evaluate(model, n_iterations=eval_args.eval_episodes, deterministic=eval_args.eval_mode == 'deterministic', promptless_eval=eval_args.promptless_eval)
for k, v in eval_logs.items():
logs[f'evaluation/{task.name}/{k}'] = v
logs['time/evaluation'] = time.time() - eval_start
print('=' * 80)
print('Evaluation results:')
for k, v in logs.items():
print(f'{k}: {v}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default=None) # path to model checkpoint
parser.add_argument('--args_path', type=str, default=None) # path to args.json file, will use args from same dir if None
parser.add_argument('--device', type=str, default='cuda') # e.g. cuda:0
# evaluation
parser.add_argument('--eval_episodes', type=int, default=None)
parser.add_argument('--eval_mode', type=str, default='deterministic', choices=['deterministic', 'stochastic'])
parser.add_argument('--promptless_eval', action='store_true', default=None)
parser.add_argument('--top_k', type=int, default=None) # sample prompts only from top k episodes
parser.add_argument('--render', action='store_true', default=None)
# datasets / envs
parser.add_argument('--datasets', type=str, nargs='+', default=None)
args = parser.parse_args()
args = DotDict(vars(args))
main(args) | CyberTron-master | cybertron/models/gato2/eval.py |
CyberTron-master | cybertron/models/gato2/gato/__init__.py |
|
from abc import ABC
class Task(ABC):
def __init__(self):
pass
def sample_batch(self, vanilla_batch_size, prompted_batch_size, device, max_tokens=1024):
pass
def evaluate(self, model, n_iterations):
pass
| CyberTron-master | cybertron/models/gato2/gato/tasks/task.py |
import gymnasium as gym
import numpy as np
import torch
import minari
from minari.dataset.minari_dataset import EpisodeData
from gato.tasks.task import Task
supported_spaces = [
gym.spaces.Box,
gym.spaces.Discrete,
]
def tokens_per_space(space):
if type(space) == gym.spaces.Box:
return space.shape[0]
elif type(space) == gym.spaces.Discrete:
return 1
else:
raise NotImplementedError(f'Unsupported space: {space}')
class ControlTask(Task):
def __init__(
self,
env_name: str,
env: gym.Env,
dataset: minari.MinariDataset,
context_len: int,
args,
training_prompt_len_proportion=0.5,
share_prompt_episodes=True,
top_k_prompting=None
):
super().__init__()
self.name = env_name
self.env = env
self.dataset = dataset
self.args = args
self.action_type = type(self.env.action_space)
self.observation_type = type(self.env.observation_space)
assert self.action_type in supported_spaces, f'Unsupported action space: {self.env.action_space}'
assert self.observation_type in supported_spaces, f'Unsupported observation space: {self.env.observation_space}'
# Determine types of obseravation, action for task
if type(self.env.observation_space) == gym.spaces.Box:
if len(self.env.observation_space.shape) == 2 or len(self.env.observation_space.shape) == 3:
obs_str = 'images'
else:
obs_str = 'continuous_obs'
elif type(self.env.observation_space) == gym.spaces.Discrete:
obs_str = 'discrete_obs'
self.obs_str = obs_str
if obs_str == 'images':
self.image_transform = ControlImageTransform(env, args.patch_size)
else:
self.image_transform = None
if type(self.env.action_space) == gym.spaces.Box:
action_str = 'continuous_actions'
elif type(self.env.action_space) == gym.spaces.Discrete:
action_str = 'discrete_actions'
self.action_str = action_str
self.action_tokens = tokens_per_space(self.env.action_space)
if obs_str == 'images':
# Calculate tokens after image transform
image_shape = self.image_transform.transform(torch.tensor(env.observation_space.sample())).shape
self.observation_tokens = image_shape[-1] // args.patch_size * image_shape[-2] // args.patch_size
else:
self.observation_tokens = tokens_per_space(self.env.observation_space)
self.tokens_per_timestep = self.action_tokens + self.observation_tokens + 1 # additional separator token
assert context_len >= self.tokens_per_timestep, f'Context length must be at least {self.tokens_per_timestep} for env {env_name}'
# If sampled episode needs a prompt, this specifies what proportion of tokens should be from the prompt
self.training_prompt_len_proportion = training_prompt_len_proportion
assert self.training_prompt_len_proportion >= 0 and self.training_prompt_len_proportion <= 1
# Specifies if prompt should come from the same episode as the main chunk during training
self.share_prompt_episodes = share_prompt_episodes
# Ways of sampling prompts
self.prompt_types = ['start', 'end','uniform']
# If prompts should be sampled from top k episodes, or uniform during eval
self.top_k_prompting = top_k_prompting
if self.top_k_prompting is not None:
assert self.top_k_prompting > 0 and self.top_k_prompting <= self.dataset.total_episodes, 'top k must be between 0 and total episodes for all datasets'
# calculate top k ep ids
ep_returns = np.array([ep.rewards.sum() for ep in self.dataset])
self.top_ids = np.argsort(ep_returns)[-self.top_k_prompting:]
else:
self.top_ids = None
def evaluate(self, model, n_iterations, deterministic=True, promptless_eval=False):
# serial evaluation
returns = []
ep_lens = []
metrics = {}
context_timesteps = model.context_len // self.tokens_per_timestep # amount of timesteps that fit into context
for i in range(n_iterations):
observation, info = self.env.reset()
# sample prompt
if not promptless_eval:
input_dict = self.sample_batch_configurable(batch_size=1, device=model.device, prompt_proportions=[1.], prompt_types = ['end'], max_tokens = model.context_len, share_prompt_episodes=True,ep_ids=self.top_ids)[0]
else:
input_dict = None
done = False
ep_return = 0
ep_len = 0
while not done:
new_obs = torch.tensor(observation, device=model.device).unsqueeze(0)
if self.image_transform is not None:
new_obs = self.image_transform.transform(new_obs)
# append new observation, and pad actions
if input_dict is not None:
input_dict[self.obs_str] = torch.cat([input_dict[self.obs_str], new_obs], dim=0)
input_dict[self.action_str] = torch.cat([input_dict[self.action_str], torch.zeros(1, self.action_tokens, device=model.device, dtype=input_dict[self.action_str].dtype)], dim=0)
else:
input_dict = {
self.obs_str: new_obs,
self.action_str: torch.zeros(1, self.action_tokens, device=model.device, dtype=torch.float32),
}
# trim to context length
input_dict[self.obs_str] = input_dict[self.obs_str][-context_timesteps:,]
input_dict[self.action_str] = input_dict[self.action_str][-context_timesteps:,]
action = model.predict_control(input_dict, task=self, deterministic=deterministic)
input_dict[self.action_str][-1,] = action
np_action = action.cpu().numpy()
observation, reward, terminated, truncated, info = self.env.step(np_action)
done = terminated or truncated
ep_return += reward
ep_len += 1
returns.append(ep_return)
ep_lens.append(ep_len)
metrics['mean_return'] = np.mean(returns)
metrics['mean_episode_len'] = np.mean(ep_lens)
return metrics
def sample_batch(self, vanilla_batch_size:int , prompted_batch_sizes: dict, device, max_tokens=1024):
episode_dicts = []
# Determine batch sizes
prompted_batch_size = 0
for prompt_type, batch_size in prompted_batch_sizes.items():
assert prompt_type in self.prompt_types
prompted_batch_size += batch_size
batch_size = vanilla_batch_size + prompted_batch_size
prompt_propotions = []
prompt_types = []
for i in range(vanilla_batch_size):
prompt_propotions.append(0)
prompt_types.append(None) # should not be used
for prompt_type, prompt_batch_size in prompted_batch_sizes.items():
prompt_propotions += [self.training_prompt_len_proportion] * prompt_batch_size
prompt_types += [prompt_type] * prompt_batch_size
assert len(prompt_propotions) == batch_size and len(prompt_types) == batch_size, f'Batch size mismatch: {len(prompt_propotions)} != {batch_size} or {len(prompt_types)} != {batch_size}'
episode_dicts = self.sample_batch_configurable(
batch_size,
device,
prompt_propotions,
prompt_types,
max_tokens=max_tokens,
share_prompt_episodes=self.share_prompt_episodes
)
return episode_dicts
def sample_batch_configurable(
self, batch_size: int,
device: str,
prompt_proportions: list,
prompt_types: list,
max_tokens: int = 1024,
share_prompt_episodes=True,
ep_ids = None
):
# Samples a batch of episodes, where each episode has maximum of max_tokens tokens
# This will return a list of dictionaries, where each dicionary contains variable length tensors,
# This is in constrast to returning single tensors which contain all episodes with padding
# Maximum number of timesteps we can fit in context
num_timesteps = max_tokens // self.tokens_per_timestep
# List of numpy arrays for each episode
episodes_data = {
'actions': [],
'observations': [],
}
# Filter dataset if filter function is provided
all_episodes = self.sample_episodes(n_episodes=batch_size, episode_indices=ep_ids)
if share_prompt_episodes:
main_episodes = all_episodes
prompt_episodes = all_episodes
else:
main_episodes = all_episodes
# prompts come from different episodes
prompt_episodes = all_episodes[1:] + all_episodes[:1]
# If prompt_proportion is nonzero, then each episode has a proportion of its tokens replaced with a prompt
# sample "non-prompt" chunk from each episode
timesteps_for_mains = []
timesteps_for_prompts = []
for i, episode in enumerate(main_episodes):
timesteps_for_main = round(num_timesteps * (1 - prompt_proportions[i]))
timesteps_for_mains.append(timesteps_for_main) # max main size
timesteps_for_prompts.append(num_timesteps - timesteps_for_main) # max prompt size
ep_len = episode.total_timesteps
if timesteps_for_main >= ep_len:
# sample entire episode
start = 0
end = ep_len - 1
else:
# sample which timestep to start with
start = np.random.randint(0, ep_len - timesteps_for_main)
end = start + timesteps_for_main
observations = episode.observations[start:end,]
actions = episode.actions[start:end,]
episodes_data['observations'].append(observations)
episodes_data['actions'].append(actions)
# add prompt
for i, episode in enumerate(prompt_episodes):
ep_len = episode.total_timesteps
timesteps_for_prompt = timesteps_for_prompts[i]
prompt_type = prompt_types[i]
if timesteps_for_prompt > 0:
assert prompt_type in self.prompt_types, 'Invalid prompt type'
if timesteps_for_prompt >= ep_len:
# sample entire episode
prompt_start = 0
prompt_end = ep_len - 1
if prompt_type == 'start':
prompt_start = 0
prompt_end = timesteps_for_prompt - 1
elif prompt_type == 'end':
prompt_end = ep_len - 1
prompt_start = prompt_end - timesteps_for_prompt + 1
elif prompt_type == 'uniform':
prompt_start = np.random.randint(0, ep_len - timesteps_for_prompt)
prompt_end = prompt_start + timesteps_for_prompt - 1
# Extract prompt and add to main chunk
prompt_obs = episode.observations[prompt_start:(prompt_end + 1),]
prompt_actions = episode.actions[prompt_start:(prompt_end + 1),]
episodes_data['observations'][i] = np.concatenate([prompt_obs, episodes_data['observations'][i]], axis=0)
episodes_data['actions'][i] = np.concatenate([prompt_actions, episodes_data['actions'][i]], axis=0)
# Convert to dictionary for each episode
episode_dicts = []
for i in range(batch_size):
actions = episodes_data['actions'][i]
observations = episodes_data['observations'][i]
# convert observations to tensors
if type(self.env.observation_space) == gym.spaces.Box:
observations = torch.tensor(observations, dtype=torch.float32, device=device)
elif type(self.env.observation_space) == gym.spaces.Discrete:
observations = torch.tensor(observations, dtype=torch.int32, device=device)
# apply image transforms
if self.image_transform is not None:
observations = self.image_transform.transform(observations)
# convert actions to tensors
if type(self.env.action_space) == gym.spaces.Box:
actions = torch.tensor(actions, dtype=torch.float32, device=device)
elif type(self.env.action_space) == gym.spaces.Discrete:
actions = torch.tensor(actions, dtype=torch.int32, device=device)
# make sure actions are 2D
actions = actions.reshape(actions.shape[0], self.action_tokens)
episode_dict = {
self.action_str: actions,
self.obs_str: observations,
}
episode_dicts.append(episode_dict)
return episode_dicts
# Extension of default Minari sample_episodes where custom episode_indices can be passed
def sample_episodes(self, n_episodes: int, episode_indices: list = None):
"""Sample n number of episodes from the dataset.
Args:
n_episodes (Optional[int], optional): number of episodes to sample.
"""
if episode_indices is None:
episode_indices = self.dataset._episode_indices
indices = self.dataset._generator.choice(
episode_indices, size=n_episodes, replace=False
)
episodes = self.dataset._data.get_episodes(indices)
return list(map(lambda data: EpisodeData(**data), episodes))
class ControlImageTransform:
def __init__(self, env, patch_size=16):
self.env = env
self.patch_size = patch_size
assert type(self.env.observation_space) == gym.spaces.Box, 'Only supports Box observation space'
assert len(self.env.observation_space.shape) == 3 or len(self.env.observation_space.shape) == 2, 'Only supports 2D or 3D observation space'
self.channel_first = None
self.grayscale = False
# Check if grayscale or RGB
if len(self.env.observation_space.shape) == 3:
# Check if channel first or channel last
assert self.env.observation_space.shape[0] == 3 or self.env.observation_space.shape[-1] == 3, '3 channel first or channel last'
self.channel_first = self.env.observation_space.shape[0] == 3
if self.channel_first:
self.height = self.env.observation_space.shape[1]
self.width = self.env.observation_space.shape[2]
else:
self.height = self.env.observation_space.shape[0]
self.width = self.env.observation_space.shape[1]
else:
self.grayscale = True
self.height = self.env.observation_space.shape[0]
self.width = self.env.observation_space.shape[1]
# check how much padding is needed
self.padding_h = 0
self.padding_w = 0
if self.height % self.patch_size != 0:
self.padding_h = self.patch_size - (self.height % self.patch_size)
if self.width % self.patch_size != 0:
self.padding_w = self.patch_size - (self.width % self.patch_size)
def transform(self, images: torch.Tensor):
if self.grayscale:
images = images.reshape(-1, 1, self.height, self.width)
images = images.repeat(1, 3, 1, 1)
else:
if not self.channel_first:
images = images.permute(0, 3, 1, 2)
# all images now B X 3 X H X W, add padding:
images = torch.nn.functional.pad(images, (0, self.padding_w, 0, self.padding_h), value=0) # left, right, top, bottom padding
return images | CyberTron-master | cybertron/models/gato2/gato/tasks/control_task.py |
CyberTron-master | cybertron/models/gato2/gato/tasks/__init__.py |
|
CyberTron-master | cybertron/models/gato2/gato/training/__init__.py |
|
import time
import os
import wandb
import numpy as np
import torch
from gato.utils.utils import save_model
class Trainer:
def __init__(
self,
model,
optimizer,
tasks,
exp_name,
args
):
self.model = model
self.optimizer = optimizer
self.tasks = tasks
self.args = args
self.print_logs = True # args.print_logs
self.device = args.device
self.min_lr = self.args.learning_rate / self.args.min_factor
self.deterministic = self.args.eval_mode == 'deterministic'
self.exp_name = exp_name
self.exp_dir = os.path.join(self.args.save_dir, self.exp_name)
self.steps = 0
self.start_time = None
def train(self):
self.start_time = time.time()
iters = self.args.training_steps // self.args.log_eval_freq
for i in range(iters):
logs = self.train_iteration(self.args.log_eval_freq, i)
if self.args.use_wandb:
wandb.log(logs)
## Save model at end of training only if not saving checkpoints
if self.args.save_model and self.args.save_mode == 'last':
save_model(self.model, self.exp_dir, f'checkpoint_{self.steps}', self.args)
def train_iteration(self, num_steps, iter):
logs = {}
train_start = time.time()
train_losses = []
self.model.train()
for i in range(num_steps):
self.steps += 1
train_loss, step_logs = self.train_step()
train_losses.append(train_loss)
# add logs from last train_step as well
for log in step_logs:
logs[log] = step_logs[log]
logs['time/training'] = time.time() - train_start
eval_start = time.time()
self.model.eval()
# loop over eval for each env
with torch.no_grad():
for task in self.tasks:
eval_logs = task.evaluate(self.model, n_iterations=self.args.eval_episodes, deterministic=self.deterministic, promptless_eval=self.args.promptless_eval)
for k, v in eval_logs.items():
logs[f'evaluation/{task.name}/{k}'] = v
logs['time/total'] = time.time() - self.start_time
logs['time/evaluation'] = time.time() - eval_start
logs['training/train_loss_mean'] = np.mean(train_losses)
logs['training/train_loss_std'] = np.std(train_losses)
if self.print_logs:
print('=' * 80)
print(f'Iteration {iter}')
for k, v in logs.items():
print(f'{k}: {v}')
## Save model
if self.args.save_model and self.args.save_mode == 'checkpoint':
save_model(self.model, self.exp_dir, f'checkpoint_{self.steps}', self.args)
return logs
def train_step(self):
logs = {}
base_lr = self.args.learning_rate
min_lr = self.min_lr
init_lr = self.args.init_lr
# Calculate learning rate relative to current step
lr = linear_warmup_cosine_decay(self.steps, self.args.warmup_steps, self.args.training_steps, base_lr, init_lr, min_lr, disable_cosine_decay=self.args.disable_cosine_decay)
logs['training/learning_rate'] = lr
# Apply
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
# Build training batch
batch_dicts = self.sample_control_batch(self.args.batch_size)
# Compute loss and update model
# if self.steps >= 100:
# logits, loss = self.model.forward(inputs = batch_dicts, compute_loss=True, pdb=True)
# else:
logits, loss = self.model.forward(inputs = batch_dicts, compute_loss=True)
self.optimizer.zero_grad()
loss.backward()
if not self.args.disable_grad_clip:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.grad_norm_clip)
self.optimizer.step()
return loss.detach().cpu().item(), logs
def sample_control_batch(self, batch_size):
batch_dicts = []
sampled_task_indices = []
n_tasks = len(self.tasks)
while len(sampled_task_indices) < batch_size:
max_n = min(n_tasks, batch_size - len(sampled_task_indices))
new_tasks = np.random.choice(np.arange(n_tasks), size=max_n, replace=False).tolist()
sampled_task_indices.extend(new_tasks)
n_prompted_episodes = round(batch_size * self.args.prompt_ep_proportion)
batch_size - n_prompted_episodes
# determine prompted episodes and their prompting type (end or uniform)
prompt_indices = np.random.choice(batch_size, size=n_prompted_episodes, replace=False).tolist()
end_indices = np.random.choice(prompt_indices, size=round(len(prompt_indices) / 2), replace=False).tolist()
uniform_indices = [i for i in prompt_indices if i not in end_indices]
# aggregate acrosss tasks sampled multiple times
for i, task in enumerate(self.tasks):
total_task_batch_size = 0
task_vanilla_batch_size = 0
task_prompted_batch_sizes = {}
for type_index, task_index in enumerate(sampled_task_indices):
if task_index == i:
total_task_batch_size += 1
if type_index in end_indices:
task_prompted_batch_sizes['end'] = task_prompted_batch_sizes.get('end', 0) + 1
elif type_index in uniform_indices:
task_prompted_batch_sizes['uniform'] = task_prompted_batch_sizes.get('uniform', 0) + 1
else:
task_vanilla_batch_size += 1
# sample episodes from dataset
if total_task_batch_size > 0:
task_episode_dicts = task.sample_batch(task_vanilla_batch_size, task_prompted_batch_sizes, self.device, max_tokens=self.args.sequence_length)
batch_dicts.extend(task_episode_dicts)
return batch_dicts
def linear_warmup_cosine_decay(current_step, warmup_steps, max_steps, base_lr, init_lr, min_lr, disable_cosine_decay=False):
# Linear Warmup from init_lr to base_lr over warmup_steps
if current_step <= warmup_steps:
lr = init_lr + (base_lr - init_lr) * current_step / warmup_steps
elif not disable_cosine_decay:
# cosine decay from base_lr to min_lr over remaining steps
progress = (current_step - warmup_steps) / float(max(1, max_steps - warmup_steps))
lr = min_lr + 0.5 * (base_lr - min_lr) * (1 + np.cos(np.pi * progress))
else:
lr = base_lr
return lr
if __name__ == '__main__':
# Test LR schedule
import matplotlib.pyplot as plt
init_lr = 1e-7
base_lr = 1e-4
min_lr = base_lr / 10
warmup_steps = 15_000
max_steps = 1_015_000
current_steps = np.arange(1, max_steps + 1)
lr = np.zeros_like(current_steps, dtype=np.float32)
for step in current_steps:
lr[step - 1] = linear_warmup_cosine_decay(step, warmup_steps, max_steps, base_lr, init_lr, min_lr)
plt.plot(current_steps, lr)
plt.show() | CyberTron-master | cybertron/models/gato2/gato/training/trainer.py |
import os
import json
import torch
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def save_model(model, save_dir, save_name, args):
# create save dir if not exists
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# save args for loading model (if not already saved)
args_path = os.path.join(save_dir, 'args.json')
if not os.path.exists(args_path):
with open(args_path, 'w') as f:
json.dump(args, f)
# save model
state_dict = model.state_dict()
torch.save(state_dict, os.path.join(save_dir, save_name + '.pt'))
| CyberTron-master | cybertron/models/gato2/gato/utils/utils.py |
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import os
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
)
from transformers.modeling_utils import (
Conv1D,
PreTrainedModel,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
super().__init__()
self.config = config
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd: ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
else:
present = (None,)
self.flash = False
if not self.flash:
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
else:
assert head_mask is None, "head_mask not implemented for flash"
assert not output_attentions, "output_attentions not implemented for flash"
key = key.permute(0, 1, 3, 2) # (batch, head, seq_length, head_features)
nd = query.size(-2)
ns = key.size(-2)
causal_mask = self.bias[:, :, ns - nd: ns, :ns]
attention_mask = (-(causal_mask.to(attention_mask.dtype) - 1) * self.masked_bias) + attention_mask
a = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=self.config.attn_pdrop if self.training else 0)
attn_outputs = [a]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class AdapterMLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
hidden_size = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
# self.adapter_ln = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
# self.adapter_mlp = AdapterMLP(512, config) # ADAPTER
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
attn_outputs = self.attn(
self.ln_1(hidden_states),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + hidden_states
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
# hidden_states = hidden_states + self.adapter_ln(self.adapter_mlp(hidden_states))
outputs = [hidden_states] + outputs
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# module.weight.data.fill_(.01) # KL: Adapter change
@dataclass
class GPT2DoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,
batch_size, num_heads, sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
GPT2_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
GPT2_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
:obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
``past_key_values[0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
passed as ``input_ids``.
Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
past_key_values (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
:obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
have their past given to this model should not be passed as ``input_ids`` as they have already been
computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
:obj:`past_key_values`).
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
PARALLELIZE_DOCSTRING = r"""
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
it will evenly distribute blocks across all devices.
Args:
device_map (:obj:`Dict[int, list]`, optional, defaults to None):
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
automatically mapped to the first device (for esoteric reasons). That means that the first device should
have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
following number of attention modules:
- gpt2: 12
- gpt2-medium: 24
- gpt2-large: 36
- gpt2-xl: 48
Example::
# Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
model = GPT2LMHeadModel.from_pretrained('gpt2-xl')
device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}
model.parallelize(device_map)
"""
DEPARALLELIZE_DOCSTRING = r"""
Moves the model to cpu from a model parallel state.
Example::
# On a 4 GPU machine with gpt2-large:
model = GPT2LMHeadModel.from_pretrained('gpt2-large')
device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],
1: [8, 9, 10, 11, 12, 13, 14, 15],
2: [16, 17, 18, 19, 20, 21, 22, 23],
3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}
model.parallelize(device_map) # Splits the model across several devices
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
"""
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
# self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
self.use_layers = None
def set_layers(self, num_layers):
assert 1 <= num_layers <= len(self.h)
if num_layers is not None:
num_layers -= 1
self.use_layers = num_layers
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
self.wpe = self.wpe.to(self.first_device)
# Load onto devices
for k, v in self.device_map.items():
for block in v:
cuda_device = "cuda:" + str(k)
self.h[block] = self.h[block].to(cuda_device)
# ln_f to last
self.ln_f = self.ln_f.to(self.last_device)
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
self.wte = self.wte.to("cpu")
self.wpe = self.wpe.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
# @add_code_sample_docstrings(
# tokenizer_class=_TOKENIZER_FOR_DOC,
# checkpoint="gpt2",
# output_type=BaseModelOutputWithPastAndCrossAttentions,
# config_class=_CONFIG_FOR_DOC,
# )
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
# position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds # + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if self.use_layers is not None and i >= self.use_layers:
break
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = layer_past.to(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
# checkpointing only works with tuple returns, not with lists
return tuple(output for output in module(*inputs, use_cache, output_attentions))
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
layer_past,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
) | CyberTron-master | cybertron/models/gato2/gato/transformers/trajectory_gpt2.py |
CyberTron-master | cybertron/models/gato2/gato/transformers/__init__.py |
|
from __future__ import annotations
from typing import Optional, Union, Tuple
import math
import torch
import torch.nn as nn
from transformers.models.openai import OpenAIGPTConfig, OpenAIGPTPreTrainedModel
from transformers.models.openai.modeling_openai import (
Attention as _Attention,
BaseModelOutput,
Conv1D,
ACT_FNS,
)
class HFGPT(nn.Module):
def __init__(
self,
*,
vocab_size=40478,
n_positions=512,
n_embd=768,
n_layer=12,
n_head=12,
dropout: float = 0.1,
activation_fn: str = "geglu",
):
super().__init__()
kwargs = {}
if activation_fn == "geglu":
kwargs["afn"] = "geglu"
else:
kwargs["afn"] = activation_fn
cfg = OpenAIGPTConfig(
vocab_size=vocab_size,
n_positions=n_positions,
n_embd=n_embd,
n_layer=n_layer,
n_head=n_head,
resid_pdrop=dropout,
embd_pdrop=dropout,
attn_pdrop=dropout,
summary_first_dropout=dropout,
**kwargs,
)
self.lm = OpenAIGPTModel(cfg)
def forward(
self,
x: torch.Tensor,
*,
custom_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
batch_first: bool = False,
):
"""
x: (L, B, E) if batch_first == False else (B, L, E)
custom_mask: (B, L_tgt) or (B, 1, L_tgt) concurrently work with the causal mask
because of self-attention, L_tgt = L
"""
if batch_first:
B, L, E = x.shape
else:
L, B, E = x.shape
x = x.transpose(0, 1)
attention_mask = None
if custom_mask is not None:
if custom_mask.dim() == 3:
custom_mask = custom_mask.squeeze(dim=1)
attention_mask = custom_mask.float().contiguous()
out = self.lm(
inputs_embeds=x.contiguous(),
attention_mask=attention_mask,
position_ids=position_ids,
).last_hidden_state
assert out.shape == (B, L, E)
if not batch_first:
out = out.transpose(0, 1)
return out
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[
Block(config.n_positions, config, scale=True)
for _ in range(config.n_layer)
]
)
self.register_buffer("position_ids", torch.arange(config.n_positions))
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrix from position and token embeddings
position_ids = self.position_ids[None, : input_shape[-1]]
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
else:
hidden_states = inputs_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
attention_mask,
head_mask[i],
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.view(*output_shape)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class Block(nn.Module):
def __init__(self, n_positions, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_positions, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
attn_outputs = self.attn(
x,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
if config.afn == "geglu":
self.act = nn.GELU()
self.gated_layer = nn.Linear(config.n_embd, n_state, bias=False)
else:
self.act = ACT_FNS[config.afn]
self.gated_layer = None
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
if self.gated_layer is not None:
h = h * self.gated_layer(x)
h2 = self.c_proj(h)
return self.dropout(h2)
class Attention(_Attention):
def _attn(
self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False
):
q = q.to(torch.float32)
k = k.to(torch.float32)
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implementation method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
b = b.to(w.dtype)
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.functional.softmax(w, dim=-1)
w = w.to(v.dtype)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs | CyberTron-master | cybertron/models/gato2/gato/transformers/gpt.py |
import gymnasium as gym
from gymnasium.wrappers import AtariPreprocessing, TransformReward
import numpy as np
def load_atari_env(env_name: str, load_kwargs: dict):
assert 'v5' in env_name
repeat_action_probability = 0 # 0.25
clip_rewards = True
repeat_action_probability = load_kwargs.get('repeat_action_probability', repeat_action_probability)
clip_rewards = load_kwargs.get('clip_rewards', clip_rewards)
render_mode = load_kwargs.get('render_mode', None)
env = gym.make(env_name, frameskip=1, repeat_action_probability=repeat_action_probability, render_mode=render_mode) # e.g. 'ALE/Breakout-v5'
env = AtariPreprocessing(env, frame_skip=4, noop_max=0)
if clip_rewards:
env = TransformReward(env, lambda r: np.clip(r, -1.0, 1.0))
return env
| CyberTron-master | cybertron/models/gato2/gato/envs/atari.py |
import minari
import gymnasium as gym
from gato.envs.atari import load_atari_env
custom_env_loaders = {
'ALE/': load_atari_env
}
def load_envs(dataset_names: list, load_kwargs: dict = {}):
envs = []
datasets = []
for dataset_name in dataset_names:
env, dataset = load_env_dataset(dataset_name, load_kwargs)
envs.append(env)
datasets.append(dataset)
return envs, datasets
def load_env_dataset(dataset_name: str, load_kwargs: dict = {}):
# load dataset
dataset = minari.load_dataset(dataset_name)
env_name = dataset._data.env_spec.id
env = None
# custom environment build if custom loader specified
for prefix, loader in custom_env_loaders.items():
if prefix in env_name:
env = loader(env_name, load_kwargs)
break
# Default to recovering dataset from Minari
if env is None:
env = gym.make(dataset._data.env_spec, **load_kwargs)
return env, dataset
if __name__ == '__main__':
# load MuJoCo locomotion dataset, env
mujoco_env, mujoco_dataset = load_env_dataset('d4rl_halfcheetah-expert-v2')
# load atari
atari_env, atari_dataset = load_env_dataset('Breakout-expert_s0-v0')
import pdb; pdb.set_trace() | CyberTron-master | cybertron/models/gato2/gato/envs/setup_env.py |
CyberTron-master | cybertron/models/gato2/gato/data/__init__.py |
|
import os
import gdown
datasets = {
'd4rl_halfcheetah-expert-v2': 'https://drive.google.com/drive/folders/1GqE2c3oqutBYLOvP-l6cSZ1F7mqs7DOS?usp=drive_link',
'd4rl_hopper-expert-v2': 'https://drive.google.com/drive/folders/1vl4GsvHDE6Pm7UAzDE1YxC8AIaGknMrp?usp=drive_link',
'd4rl_walker2d-expert-v2': 'https://drive.google.com/drive/folders/1HugHUSU_7qZEKg23cY2pSiN4sakkAtmH?usp=drive_link',
'Breakout-expert_s0-v0': 'https://drive.google.com/drive/folders/1j_BWhVuk-WJ67hrXfrN9beaGzxuDF1NN?usp=drive_link'
}
if __name__ == '__main__':
minari_dir = os.path.join(os.path.expanduser('~'), '.minari')
# create diretories if they do not exist
if not os.path.exists(minari_dir):
os.mkdir(minari_dir)
datasets_dir = os.path.join(minari_dir, 'datasets')
if not os.path.exists(datasets_dir):
os.mkdir(datasets_dir)
# download datasets, if they do not exist already
for dataset_name, url in datasets.items():
target_path = os.path.join(datasets_dir, dataset_name)
if os.path.exists(target_path):
print(f'{dataset_name} already exists at {target_path}, skipping')
continue
gdown.download_folder(url=url, output=target_path, quiet=False, use_cookies=False) | CyberTron-master | cybertron/models/gato2/gato/data/download_custom_datasets.py |
import torch
import math
def mu_law(tensor, mu=100, M=256):
return torch.sign(tensor) * torch.log(1 + mu * torch.abs(tensor)) / math.log(1 + mu*M) #torch.log(1 + mu*M)
class ContinuousTokenizer:
def __init__(self, use_mu_law=True, mu=100, M=256, n_bins=1024, offset=None):
self.use_mu_law = use_mu_law
self.mu = mu
self.M = M
self.n_bins = n_bins
self.offset = offset
def encode(self, tensor):
if self.use_mu_law:
tensor = mu_law(tensor, self.mu, self.M)
# clip to [-1, 1]
tensor = torch.clamp(tensor, -1, 1)
# discretize using uniform bins
tensor = (tensor + 1) * (self.n_bins / 2)
tensor = tensor.type(torch.int32)
if self.offset is not None:
tensor += self.offset
return tensor
def decode(self, tensor):
if self.use_mu_law:
raise Exception("mu-law encoding only expected with values which are not predicted")
if self.offset is not None:
tensor -= self.offset
# convert back from discrete to continous values, discrete values should be from [0, 1023]
tensor = (2 * tensor) / self.n_bins - 1
return tensor
if __name__ == '__main__':
tokenizer = ContinuousTokenizer(use_mu_law=False, offset=0)
input = (-1 - 1) * torch.rand(1, 10) + 1
encoded = tokenizer.encode(input)
decoded = tokenizer.decode(encoded)
tokenizer = ContinuousTokenizer(use_mu_law=True, offset=0)
#input = (-1 - 1) * torch.rand(1, 10) + 1
encoded = tokenizer.encode(input)
| CyberTron-master | cybertron/models/gato2/gato/policy/input_tokenizers.py |
CyberTron-master | cybertron/models/gato2/gato/policy/__init__.py |
|
import torch
import torch.nn as nn
import gymnasium as gym
import transformers
# import gato
from gato.transformers import GPT2Model
from gato.policy.embeddings import ImageEmbedding
from gato.policy.input_tokenizers import ContinuousTokenizer
from gato.tasks.control_task import ControlTask
class GatoPolicy(nn.Module):
def __init__(
self,
device: str,
embed_dim: int,
layers: int,
heads: int,
dropout: float,
activation_fn='gelu',
mu: int = 100,
M: int = 256,
patch_size: int = 16,
resid_mid_channels: int = 132,
num_groups: int = 32,
position_vocab_size: int = 128,
continuous_tokens: int = 1024,
discrete_tokens: int = 1024,
context_len=1024,
use_pos_encoding: bool = True,
use_patch_pos_encoding: bool = True,
pretrained_lm: str = None # Optional, name of pretrained language model to use
):
super().__init__()
self.device = device
self.context_len = context_len
# this is a dummy value as this implementation does not yet handle language IO
#self.text_tokens = 32000 # SentencePiece vocab size
self.text_tokens = 1
self.continuous_tokens = continuous_tokens
self.discrete_tokens = discrete_tokens
self.vocab_size = self.text_tokens + self.discrete_tokens + self.continuous_tokens
# order of text, continuous, discrete
self.token_starts = {
'text': 0,
'continuous': self.text_tokens,
'discrete': self.text_tokens + self.continuous_tokens
}
self.token_ends = {
'text': self.text_tokens - 1,
'continuous': self.text_tokens + self.continuous_tokens - 1,
'discrete': self.text_tokens + self.continuous_tokens + self.discrete_tokens - 1
}
# self.transformer = HFGPT(
# n_embd=embed_dim,
# n_layer=layers,
# n_head=heads,
# dropout=dropout,
# vocab_size=self.vocab_size,
# n_positions=context_len,
# activation_fn=activation_fn,
# )
if pretrained_lm is not None:
config = transformers.GPT2Config.from_pretrained(pretrained_lm)
config.attn_pdrop = dropout # 0.1
config.resid_pdrop = dropout
self.transformer = GPT2Model.from_pretrained(
pretrained_lm,
config=config,
)
self.embed_dim = config.n_embd
else:
config = transformers.GPT2Config(
vocab_size=1, # doesn't matter -- we don't use the vocab
n_embd=embed_dim,
n_head=heads,
n_layer=layers,
resid_pdrop=dropout,
attn_pdrop=dropout,
n_positions=context_len,
n_inner=embed_dim * 4,
activation_function=activation_fn,
)
config.n_ctx = context_len
self.transformer = self.transformer = GPT2Model(config)
self.embed_dim = embed_dim
# head
self.predict_token = nn.Linear(embed_dim, self.vocab_size, bias=False)
self.separator_token = nn.Parameter(torch.zeros(embed_dim))
# Tokenizers
self.text_tokenizer = None # e.g. SentencePiece
self.continuous_action_tokenizer = ContinuousTokenizer(
use_mu_law=False, mu=mu, M=M, n_bins=self.continuous_tokens, offset=self.token_starts['continuous']
) # continuous actions expected to be in [-1, 1]
self.continuous_obs_tokenizer = ContinuousTokenizer(
use_mu_law=True, mu=mu, M=M, n_bins=self.continuous_tokens, offset=self.token_starts['continuous']
)
# Token Embeddings
self.embed_token = nn.Embedding(self.vocab_size, embed_dim)
## Image Embeddings
self.use_patch_pos_encoding = use_patch_pos_encoding
self.image_embedding = ImageEmbedding(
embed_dim=embed_dim,
patch_size=patch_size,
resid_mid_channels=resid_mid_channels,
num_groups=num_groups,
position_vocab_size=position_vocab_size,
use_pos_encoding=self.use_patch_pos_encoding,
)
## Inner-timestep Embeddings
self.use_pos_encoding = use_pos_encoding
self.pos_embed_observation = nn.Embedding(context_len, embed_dim)
# predicts next token (for each input token)
def forward(self, inputs: list = None, compute_loss=False, **kwargs):
# tokenize inputs
if inputs is not None:
token_embeddings, tokens, token_target_masks, token_masks = self.tokenize_input_dicts(inputs)
else:
assert 'token_embeddings' in kwargs and 'tokens' in kwargs and 'token_target_masks' in kwargs and 'token_masks' in kwargs, 'if inputs is None, must provide embeddings, tokens, and masks'
token_embeddings = kwargs['token_embeddings']
tokens = kwargs['tokens']
token_target_masks = kwargs['token_target_masks']
token_masks = kwargs['token_masks']
# pass to transformer
#final_representations = self.transformer(x = token_embeddings, custom_mask = token_masks, batch_first=True)
final_representations = self.transformer(inputs_embeds=token_embeddings, attention_mask=token_masks)['last_hidden_state']
# predict logits
logits = self.predict_token(final_representations)
if compute_loss:
# obtain target tokens, and pad
loss_logits = logits[:, :-1, :]
token_masks = token_masks[:, :-1] # whether originating token is valid
token_target_masks = token_target_masks[:, 1:] # whether target token is valid
loss_masks = token_masks * token_target_masks
target_tokens = tokens[:, 1:]
loss_masks = loss_masks.reshape(-1)
loss_logits = loss_logits.reshape(-1, self.vocab_size)[loss_masks > 0]
target_tokens = target_tokens.reshape(-1)[loss_masks > 0]
loss = torch.nn.functional.cross_entropy(loss_logits, target_tokens)
if 'pdb' in kwargs and kwargs['pdb']:
import pdb; pdb.set_trace()
else:
loss = None
return logits, loss
def tokenize_input_dicts(self, inputs: list):
""""
inputs: list of dicts for each batch
[
{
# observations
text: T x L or None
images: T x 3 x H x W or None
continuous_obs: T x C or None # continuous vector observations
discrete_obs: T x D or None # discrete observations
# actions
continuous_actions: T x A or None
discrete_actions: T x B or None
},
...
{
}
]
returns: the tokens_id, tokens_embedding for each batch respectively
token_embedding:
[
tensor 1 x ? x embed_dim
tensor 1 x ? x embed_dim
tensor 1 x ? x embed_dim
...
] where ? represents the variable number of tokens for each batch
token_id:
[
tensor 1 x ? x 1
tensor 1 x ? x 1
tensor 1 x ? x 1
...
] where each element is the token id for each token embedding, which is set to -1 for image tokens
token_target:
[
tensor 1 x ? x 1
tensor 1 x ? x 1
tensor 1 x ? x 1
] # binary mask for each token, 1 if token is a predicted target token, 0 otherwise
# text observation and continuous actions are predicted, while images and observation tensors are not
"""
n_batches = len(inputs)
token_embeddings = []
tokens = []
token_target_masks = []
max_tokens = -1 # max number of timesteps across all batches
for batch in inputs:
text_tokens, text_embeddings, text_targets = None, None, None
image_tokens, image_embeddings, image_targets = None, None, None
continuous_tokens, continuous_embeddings, continuous_targets = None, None, None
discrete_tokens, discrete_embeddings, discrete_targets = None, None, None
continuous_action_tokens, continuous_action_embeddings, continuous_action_targets = None, None, None
discrete_action_tokens, discrete_action_embeddings, discrete_action_targets = None, None, None
n_timesteps = None
# tokenize text
if 'text' in batch and batch['text'] is not None:
raise NotImplementedError
text_tokens = self.text_tokenizer.tokenize(batch['text'])
text_embeddings = self.embed_token(text_tokens)
text_targets = torch.ones_like(text_tokens)
n_timesteps = text_tokens.shape[0]
# batch_ids.append(text_tokens)
# batch_embeddings.append(text_embeddings)
# batch_targets.append(torch.ones_like(text_tokens))
if 'images' in batch and batch['images'] is not None:
image_embeddings = self.image_embedding(batch['images']) # n_timesteps x n_patches x embed_dim
n_images = image_embeddings.shape[0]
n_patches = image_embeddings.shape[1]
#image_tokens = torch.ones(n_images, n_patches) * -1
image_tokens = torch.zeros(n_images, n_patches, dtype=torch.long, device=self.device)
image_targets = torch.zeros(n_images, n_patches, device=self.device)
if n_timesteps is None:
n_timesteps = n_images
else:
assert n_timesteps == n_images, "number of timesteps must be the same for all modalities"
if 'continuous_obs' in batch and batch['continuous_obs'] is not None:
continuous_tokens = self.continuous_obs_tokenizer.encode(batch['continuous_obs'])
continuous_embeddings = self.embed_token(continuous_tokens)
continuous_targets = torch.zeros_like(continuous_tokens, device=self.device)
if n_timesteps is None:
n_timesteps = continuous_tokens.shape[0]
else:
assert n_timesteps == continuous_tokens.shape[0], "number of timesteps must be the same for all modalities"
if 'discrete_obs' in batch and batch['discrete_obs'] is not None:
discrete_tokens = batch['discrete_obs']
discrete_tokens = discrete_tokens + self.token_starts['discrete'] # add offset
discrete_embeddings = self.embed_token(discrete_tokens)
discrete_targets = torch.zeros_like(discrete_tokens, device=self.device)
if n_timesteps is None:
n_timesteps = discrete_tokens.shape[0]
else:
assert n_timesteps == discrete_tokens.shape[0], "number of timesteps must be the same for all modalities"
if 'continuous_actions' in batch and batch['continuous_actions'] is not None:
continuous_action_tokens = self.continuous_action_tokenizer.encode(batch['continuous_actions'])
continuous_action_embeddings = self.embed_token(continuous_action_tokens)
continuous_action_targets = torch.ones_like(continuous_action_tokens, device=self.device)
if n_timesteps is None:
n_timesteps = continuous_action_tokens.shape[0]
else:
assert n_timesteps == continuous_action_tokens.shape[0], "number of timesteps must be the same for all modalities"
if 'discrete_actions' in batch and batch['discrete_actions'] is not None:
discrete_action_tokens = batch['discrete_actions']
discrete_action_tokens = discrete_action_tokens + self.token_starts['discrete'] # add offset
# embed
discrete_action_embeddings = self.embed_token(discrete_action_tokens)
discrete_action_targets = torch.ones_like(discrete_action_tokens)
if n_timesteps is None:
n_timesteps = discrete_action_tokens.shape[0]
else:
assert n_timesteps == discrete_action_tokens.shape[0], "number of timesteps must be the same for all modalities"
separator_embeddings = torch.ones(n_timesteps, 1, self.embed_dim, device=self.device) * self.separator_token
separator_tokens = torch.zeros(n_timesteps, 1, dtype=torch.long, device=self.device)
separator_targets = torch.zeros(n_timesteps, 1, dtype=torch.long, device=self.device)
# interleave observation, action tokens,add separator
# interleave tokens
batch_tokens = torch.cat(
[
tokens for tokens in
[text_tokens, image_tokens, continuous_tokens, discrete_tokens, separator_tokens, continuous_action_tokens, discrete_action_tokens]
if tokens is not None
],
dim=1,
)
# interleave targets
batch_target_masks = torch.cat(
[
targets for targets in
[text_targets, image_targets, continuous_targets, discrete_targets, separator_targets, continuous_action_targets, discrete_action_targets]
if targets is not None
],
dim=1
)
# interleave embeddings, n_timesteps x n_tokens x embed_dim
batch_embeddings = torch.cat(
[
embeddings for embeddings in
[text_embeddings, image_embeddings, continuous_embeddings, discrete_embeddings]
if embeddings is not None
],
dim=1
) # concat observations
n_observation_tokens = batch_embeddings.shape[1] # number of tokens per timestep
if self.use_pos_encoding:
inner_timestep_embeddings = self.pos_embed_observation(torch.arange(n_observation_tokens, device=self.device)).unsqueeze(0) # 1 x n_tokens x embed_dim
# repeat for each timestep
inner_timestep_embeddings = inner_timestep_embeddings.repeat(n_timesteps, 1, 1)
batch_embeddings = batch_embeddings + inner_timestep_embeddings
action_embeddings = torch.cat([action_embedding for action_embedding in [continuous_action_embeddings, discrete_action_embeddings] if action_embedding is not None], dim=1) # concat action
batch_embeddings = torch.cat([batch_embeddings, separator_embeddings, action_embeddings], dim=1) # concat action and separator
tokens_per_timestep = batch_embeddings.shape[1] # number of tokens per timestep
total_tokens = n_timesteps * tokens_per_timestep
# reshape to 1 x (n_timesteps * n_tokens) x embed_dim
batch_embeddings = batch_embeddings.reshape(1, total_tokens, self.embed_dim)
batch_tokens = batch_tokens.reshape(1, total_tokens)
batch_target_masks = batch_target_masks.reshape(1, total_tokens)
total_tokens = batch_embeddings.shape[1]
max_tokens = max(max_tokens, total_tokens)
token_embeddings.append(batch_embeddings)
tokens.append(batch_tokens)
token_target_masks.append(batch_target_masks)
token_masks = []
# (left pad) to max tokens
for i in range(n_batches):
# store which tokens are padding and which are real
token_masks.append(torch.cat([torch.zeros(1, max_tokens - token_embeddings[i].shape[1], device=self.device), torch.ones(1, token_embeddings[i].shape[1], device=self.device)], dim=1))
token_embeddings[i] = torch.cat([torch.zeros(1, max_tokens - token_embeddings[i].shape[1], self.embed_dim, device=self.device), token_embeddings[i]], dim=1)
tokens[i] = torch.cat([torch.zeros(1, max_tokens - tokens[i].shape[1], dtype=torch.long, device=self.device), tokens[i]], dim=1)
token_target_masks[i] = torch.cat([torch.zeros(1, max_tokens - token_target_masks[i].shape[1], device=self.device), token_target_masks[i]], dim=1)
# concat
token_embeddings = torch.cat(token_embeddings, dim=0)
tokens = torch.cat(tokens, dim=0)
token_target_masks = torch.cat(token_target_masks, dim=0)
token_masks = torch.cat(token_masks, dim=0)
return token_embeddings, tokens, token_target_masks, token_masks
# infer how many tokens needed to generate using environment, and restrict tokens generated to valid tokens for env
def predict_control(self, input: dict, task: ControlTask, deterministic: bool = True):
# expects that inputs['continuous_actions'] or inputs['discrete_actions'] are padded by 1 timestep
action_type = task.action_type # continuous or discrete
action_tokens = task.action_tokens
if action_type == gym.spaces.Discrete:
action_str = 'discrete'
assert action_tokens == 1, "only support 1 discrete action token"
elif action_type == gym.spaces.Box:
action_str = 'continuous'
start_token = self.token_starts[action_str]
end_token = self.token_ends[action_str]
# further restrict end_token if discrete action
if action_str == 'discrete':
assert task.env.action_space.n <= self.discrete_tokens, "discrete action space too large for model"
end_token = start_token + task.env.action_space.n - 1
token_embeddings, _, _, token_masks = self.tokenize_input_dicts([input])
# remove last action_tokens tokens, which are padding
token_embeddings = token_embeddings[:, :-action_tokens, :]
token_masks = token_masks[:, :-action_tokens]
predicted_tokens = []
# predict tokens, sampling or deterministically picking best token
for i in range(action_tokens):
logits, _ = self.forward(token_embeddings=token_embeddings, token_masks=token_masks, token_target_masks=None, tokens=None)
# extract valid logits from last timestep
logits = logits[0, -1, start_token:(end_token+1)]
if deterministic:
token = torch.argmax(logits, dim=-1)
else:
# sample from logits
probs = torch.nn.functional.softmax(logits, dim=-1)
token = torch.multinomial(probs, num_samples=1)[0]
token = token + start_token
# append to token_embeddings and token_masks
token_masks = torch.cat([token_masks, torch.ones(token_masks.shape[0], 1, device=self.device)], dim=1)
new_embedding = self.embed_token(token) # check shape of new_emebddingss
token_embeddings = torch.cat([token_embeddings, new_embedding.reshape(1, 1, -1)], dim=1)
# and trim to context len
token_embeddings = token_embeddings[:, -self.context_len:, :]
token_masks = token_masks[:, -self.context_len:]
predicted_tokens.append(token)
# convert tokens back to actions
if action_type == gym.spaces.Discrete:
action = predicted_tokens[0] - start_token
else:
predicted_tokens = torch.stack(predicted_tokens, dim=0)
action = self.continuous_action_tokenizer.decode(predicted_tokens)
return action
if __name__ == '__main__':
model = GatoPolicy(
device='cpu',
embed_dim=128,
layers=2,
heads=4,
dropout=0.1,
patch_size=16,
resid_mid_channels=128,
num_groups=32,
)
n_timesteps = 24
inputs = [{
#'images': torch.randn(10, 3, 224, 224),
'images': torch.randn(n_timesteps, 3, 80, 64),
'discrete_actions': torch.randint(0, 4, (n_timesteps, 1)),
}]
#output = model(inputs)
# Mix of image+discrete and continuous+continuous, and compute loss
output = model([
{
'images': torch.randn(20, 3, 80, 64),
'discrete_actions': torch.randint(0, 55, (20, 1)),
},
{
'continuous_obs': torch.randn(15, 8),
'continuous_actions': torch.randn(15, 4),
}
], compute_loss=True)
| CyberTron-master | cybertron/models/gato2/gato/policy/gato_policy.py |
import torch
import torch.nn as nn
from einops import rearrange
import math
class ImageEmbedding(nn.Module):
def __init__(
self,
embed_dim=768,
patch_size=16,
resid_mid_channels=128,
num_groups=32,
position_vocab_size=128,
use_pos_encoding=True
):
super().__init__()
self.patch_size = patch_size
self.embed_dim = embed_dim
self.patch_embedding = ResidualBlock_V2(mid_channels=resid_mid_channels, num_groups=num_groups)
self.post_embedding_projection = nn.Linear(patch_size * patch_size * 3, embed_dim)
self.use_pos_encoding = use_pos_encoding
self.patch_pos_encoding = PatchPosEncoding(position_vocab_size=position_vocab_size, embed_dim=embed_dim)
def forward(self, x, normalize=True):
# reshape? (B x 1 x H x W) -> (B x 3 x H x W) if C = 1 TODO, probably do this before this function
# all images in batch must have same weight/width but network can handle inputs of different sizes through multiple forward passes
image_height = x.shape[2]
image_width = x.shape[3]
assert image_height % self.patch_size == 0 and image_width % self.patch_size == 0, "Image dimensions must be divisible by patch size"
n_height = image_height // self.patch_size
n_width = image_width // self.patch_size
if normalize:
# map from 0 to 255 to [-1,1], then scale by patch_size
x = (x / 255.0 * 2) - 1
x = x / math.sqrt(self.patch_size)
# split into patches, rearrange
x = rearrange(x, 'b c (n_h p_1) (n_w p_2) -> (b n_h n_w) c p_1 p_2', p_1=self.patch_size, p_2=self.patch_size)
# embed patches
x = self.patch_embedding(x)
# rearrange again:
x = rearrange(x, '(b n_h n_w) c p_1 p_2 -> b n_h n_w (c p_1 p_2)', p_1=self.patch_size, p_2=self.patch_size, n_h=n_height, n_w=n_width)
# post linear projection
x = self.post_embedding_projection(x) # b n_h n_w embed_dim
# now add positional encoding
if self.use_pos_encoding:
x = x + self.patch_pos_encoding(x)
x = rearrange(x, 'b n_h n_w embed_dim -> b (n_h n_w) embed_dim')
return x
class PatchPosEncoding(nn.Module):
def __init__(self, position_vocab_size=128, embed_dim=768):
super().__init__()
self.position_vocab_size = position_vocab_size
self.embed_dim = embed_dim
self.height_pos_embedding = nn.Embedding(position_vocab_size, embed_dim)
self.width_pos_embedding = nn.Embedding(position_vocab_size, embed_dim)
def forward(self, x):
# x: B x n_height x n_width x embed_dim
# number of patches along height,width
n_height = x.shape[1]
n_width = x.shape[2]
# compute intervals
h_linspace = torch.linspace(0, 1, n_height + 1, device=x.device)
w_linspace = torch.linspace(0, 1, n_width + 1, device=x.device)
# start and end for each patch, along height and width
h_intervals = torch.stack([h_linspace[:-1],h_linspace[1:]]).T # n_height x 2
w_intervals = torch.stack([w_linspace[:-1],w_linspace[1:]]).T # n_width x 2
# convert to integer (quantize)
h_intervals = (h_intervals * self.position_vocab_size).to(dtype=torch.int32)
w_intervals = (w_intervals * self.position_vocab_size).to(dtype=torch.int32)
# sample from intervals or use mean
if self.training:
# sample from interval
h_positions = torch.tensor([torch.randint(low=interval[0], high=interval[1], size=()) for interval in h_intervals], device=x.device)
w_positions = torch.tensor([torch.randint(low=interval[0], high=interval[1], size=()) for interval in w_intervals], device=x.device)
else:
h_intervals[:, 1] = h_intervals[:, 1] - 1
w_intervals[:, 1] = w_intervals[:, 1] - 1
h_positions = h_intervals.mean(dim=-1,dtype=torch.float32).round().to(dtype=torch.int32)
w_positions = w_intervals.mean(dim=-1,dtype=torch.float32).round().to(dtype=torch.int32)
# now get embeddings
h_position_embed = self.height_pos_embedding(h_positions) # n_height x embed_dim
w_position_embed = self.width_pos_embedding(w_positions) # n_width x embed_dim
# combine height, width embeddings
h_position_embed = h_position_embed.unsqueeze(1).repeat(1, n_width, 1) # n_height x n_width x embed_dim
w_position_embed = w_position_embed.unsqueeze(0).repeat(n_height, 1, 1) # n_height x n_width x embed_dim
position_embed = h_position_embed + w_position_embed
return position_embed
class ResidualBlock_V2(nn.Module):
def __init__(self, mid_channels: int = 128, num_groups: int = 32):
super().__init__()
in_channels = 3
# Specific architecture not provided, potentially different
#self.gn1 = nn.GroupNorm(num_groups, in_channels)
self.gn1 = nn.Identity()
self.act1 = nn.GELU()
self.conv1 = nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1) # Could do 1x1, 0 padding
self.gn2 = nn.GroupNorm(num_groups, mid_channels)
self.act2 = nn.GELU()
self.conv2 = nn.Conv2d(mid_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
# input: B x 3 x 16 x 16
h = self.conv1(self.act1(self.gn1(x)))
h = self.conv2(self.act2(self.gn2(h)))
return x + h
| CyberTron-master | cybertron/models/gato2/gato/policy/embeddings.py |
# -*- coding: utf-8 -*-
"""HyenaDNA training & inference example (Public)
This code is adapted from the original colab tutorial on HyenaDNA. Check that out for an easier entry point into the code.
We provide the code here as an example for those who want something outside collab, with Huggingface integration.
Original file is located at
https://colab.research.google.com/drive/1wyVEQd4R3HYLTUOXEEQmp_I8aNC_aLhL
"""
#@title Imports
# for HyenaDNA specifically
import torch
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange
from typing import Optional
from functools import partial
from torch import Tensor
from torchvision.ops import StochasticDepth
from collections import namedtuple
import numpy as np
import os
import json
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
"""# HyenaDNA
"""
#@title Hyena layer
def fftconv(u, k, D):
"""
We apply a convolution through the fourier domain (from the Convolution Theorem)
"""
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3: k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
class Sin(nn.Module):
"""The Sin activation function for the Hyena Filter function."""
def __init__(self, dim, w=10, train_freq=True):
super().__init__()
self.freq = nn.Parameter(w * torch.ones(1, dim)) if train_freq else w * torch.ones(1, dim)
def forward(self, x):
return torch.sin(self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float=1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
"""The window function applied to the output of the (MLP) filter function."""
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
modulate: bool=True,
shift: float = 0.05,
**kwargs
):
super().__init__()
self.modulate = modulate
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
if self.modulate:
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
fused_fft_conv=False,
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
normalized=False,
**kwargs
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
self.d_model = d_model
self.use_bias = bias
self.fused_fft_conv = fused_fft_conv
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w)
self.emb_dim = emb_dim
assert emb_dim % 2 != 0 and emb_dim >= 3, "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.seq_len = seq_len
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
h = self.modulation(t, h)
return h
def forward(self, x, L, k=None, bias=None, *args, **kwargs):
if k is None: k = self.filter(L)
# Ensure compatibility with filters that return a tuple
k = k[0] if type(k) is tuple else k
y = fftconv(x, k, bias)
return y
class HyenaOperator(nn.Module):
def __init__(
self,
d_model,
l_max,
order=2,
filter_order=64,
dropout=0.0,
filter_dropout=0.0,
**filter_args,
):
r"""
Hyena operator described in the paper https://arxiv.org/pdf/2302.10866.pdf
Args:
d_model (int): Dimension of the input and output embeddings (width of the layer)
l_max: (int): Maximum input sequence length. Defaults to None
order: (int): Depth of the Hyena recurrence. Defaults to 2
dropout: (float): Dropout probability. Defaults to 0.0
filter_dropout: (float): Dropout probability for the filter. Defaults to 0.0
"""
super().__init__()
self.d_model = d_model
self.l_max = l_max
self.order = order
inner_width = d_model * (order + 1)
self.dropout = nn.Dropout(dropout)
self.in_proj = nn.Linear(d_model, inner_width)
self.out_proj = nn.Linear(d_model, d_model)
self.short_filter = nn.Conv1d(
inner_width,
inner_width,
3,
padding=2,
groups=inner_width
)
self.filter_fn = HyenaFilter(
d_model * (order - 1),
order=filter_order,
seq_len=l_max,
channels=1,
dropout=filter_dropout,
**filter_args
)
def forward(self, u, *args, **kwargs):
l = u.size(-2)
l_filter = min(l, self.l_max)
u = self.in_proj(u)
u = rearrange(u, 'b l d -> b d l')
uc = self.short_filter(u)[...,:l_filter]
*x, v = uc.split(self.d_model, dim=1)
k = self.filter_fn.filter(l_filter)[0]
k = rearrange(k, 'l (o d) -> o d l', o=self.order - 1)
bias = rearrange(self.filter_fn.bias, '(o d) -> o d', o=self.order - 1)
for o, x_i in enumerate(reversed(x[1:])):
v = self.dropout(v * x_i)
v = self.filter_fn(v, l_filter, k=k[o], bias=bias[o])
y = rearrange(v * x[0], 'b d l -> b l d')
y = self.out_proj(y)
return y
#@title Self-Attention (alternative)
"""
If you'd like to try the HyenaDNA model using attention instead, you can. ie,
use a regular decoder only Transformer.
"""
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, causal=None, key_padding_mask=None):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
causal: if passed, will override self.causal
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
False means to mask out. (B, S)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
causal = self.causal if causal is None else causal
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if key_padding_mask is not None:
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
device=scores.device)
padding_mask.masked_fill_(key_padding_mask, 0.0)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, bias=True, dropout=0.0,
softmax_scale=None, causal=False, layer_idx=None, dwconv=False,return_residual=False,device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.layer_idx = layer_idx
self.dwconv = dwconv
self.return_residual = return_residual
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
linear_cls = nn.Linear
linear_resid_cls = LinearResidual
inner_attn_cls = SelfAttention
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, key_padding_mask=None, **kwargs):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
is the is the sum of the sequence lengths in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into x. Only applicable when using
FlashAttention.
max_seqlen: int. Maximum sequence length in the batch.
key_padding_mask: boolean mask, True means to keep, False means to mask out.
(batch, seqlen). Only applicable when not using FlashAttention.
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
inference_params: for generation. Adapted from Megatron-LM (and Apex)
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
"""
kwargs = ({'key_padding_mask': key_padding_mask, **kwargs})
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, '... (three h d) -> ... three h d', three=3, d=self.head_dim)
context = self.inner_attn(qkv, **kwargs)
out = self.out_proj(rearrange(context, '... h d -> ... (h d)'))
return out if not self.return_residual else (out, x)
#@title MLP layer
"""
The MLP layer after the mixer layer (HyenaOperator).
"""
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
return_residual=False, device=None, dtype=None):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/mlp.py
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
y = self.fc1(x)
y = self.activation(y)
y = self.fc2(y)
return y if not self.return_residual else (y, x)
#@title Block layer (Hyena + MLP layers)
"""
A block consists of a Mixer layer (Hyena or attention), and a MLP layer.
"""
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout1=0., resid_dropout2=0.,
drop_path1=0., drop_path2=0.,
return_residual=False,
residual_in_fp32=False):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/block.py
For prenorm=True, this Block has a slightly different structure compared to a regular
prenorm Transformer block.
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
[Ref: https://arxiv.org/abs/2002.04745]
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
the hidden_states (output of the MLP) and the residual.
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
The residual needs to be provided (except for the very first block).
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
This is for performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
super().__init__()
self.prenorm = prenorm
self.return_residual = return_residual
self.residual_in_fp32 = residual_in_fp32
if self.residual_in_fp32:
assert self.prenorm, 'residual_in_fp32 is only compatible with prenorm=True'
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls()
self.dropout1 = dropout_cls(resid_dropout1)
self.drop_path1 = StochasticDepth(drop_path1, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout2)
self.drop_path2 = StochasticDepth(drop_path2, mode='row')
self.norm2 = norm_cls(dim)
def forward(self, hidden_states, residual = None,
mixer_subset=None, mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
"""
if self.prenorm:
dropped = self.drop_path1(self.dropout1(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
if mixer_kwargs is None:
mixer_kwargs = {}
if mixer_subset is not None:
mixer_kwargs['mixer_subset'] = mixer_subset
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
if mixer_subset is not None:
residual = residual[:, mixer_subset]
if not isinstance(self.mlp, nn.Identity):
dropped = self.drop_path2(self.dropout2(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
)
if self.return_residual: # mixer out is actually a pair here
mixer_out, hidden_states = mixer_out
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if self.return_residual: # mlp out is actually a pair here
mlp_out, hidden_states = mlp_out
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
return hidden_states
def create_mixer_cls(layer=None,
attn_layer_idx=None, attn_cfg=None, layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop('causal', True)
mha_cls = MHA
mixer_cls = partial(mha_cls, causal=causal, layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),**factory_kwargs)
else:
# mixer_cls = instantiate(registry.layer, layer, partial=True, layer_idx=layer_idx, **factory_kwargs)
mixer_cls = partial(HyenaOperator, **layer)
return mixer_cls
def create_mlp_cls(d_model, d_inner=None, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'), **factory_kwargs)
return mlp_cls
def create_block(d_model, d_inner=None,
layer=None, attn_layer_idx=None,
attn_cfg=None, layer_norm_epsilon=1e-5,
resid_dropout1=0.0, resid_dropout2=0.0, residual_in_fp32=False,
layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
mixer_cls = create_mixer_cls(layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_idx=layer_idx,
**factory_kwargs)
mlp_cls = create_mlp_cls(d_model, d_inner=d_inner,
**factory_kwargs)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(d_model, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2,residual_in_fp32=residual_in_fp32)
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True,
glu_act=False):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(p[:out_features // 2], mean=0.0, std=initializer_range / math.sqrt(2 * n_layer) * 2)
#@title Backbone model (stack of blocks)
"""
A backbone model consists of a stack of blocks. If you use attention, then
positional embeddings are included. When using Hyena, then the pos emb
revert to doing nothing.
"""
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
word_embed_proj_dim=None, device=None, dtype=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
the project up to embed_dim
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if word_embed_proj_dim is None:
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx,
**factory_kwargs)
self.project_in = None
else:
self.word_embeddings = nn.Embedding(vocab_size, word_embed_proj_dim,
padding_idx=padding_idx, **factory_kwargs)
self.project_in = nn.Linear(word_embed_proj_dim, embed_dim, bias=False,
**factory_kwargs)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim,
**factory_kwargs)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
position_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
embeddings = self.word_embeddings(input_ids)
if self.project_in is not None:
embeddings = self.project_in(embeddings)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class LMBackbone(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.process_group = process_group
self.residual_in_fp32 = residual_in_fp32
# note max_position_embeddings is 0 for Hyena, and therefore isn't used
self.embeddings = GPT2Embeddings(d_model, vocab_size, max_position_embeddings,
**factory_kwargs)
self.layers = nn.ModuleList([create_block(
d_model, d_inner=d_inner,
layer=layer, attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout, residual_in_fp32=residual_in_fp32,layer_idx=i,
**factory_kwargs,
) for i in range(n_layer)])
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids,)
residual = None
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
return hidden_states
#@title Decoder head layer
"""
A simple decoder head (using MLP) to predict a sequence level classification.
You have the option to average across all the tokens in a sequence or using the
"last" token to classify. At least, those 2 worked best for us, but we provide
other "modes" as well.
We only need this for classification. Otherwise we'll use the hidden
states of the backbone as embeddings.
"""
class SequenceDecoder(nn.Module):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
#@title Model (backbone + head)
"""
Putting it all together, the model consists of a backbone model
and a decoder head (you can turn off head for embeddings only too).
Here we use a simple head to do multi-classification, but
can also swap the head to do next token prediction too. We defer to the main
HyenaDNA for that code, since pretraining with next token prediction isn't quite
feasible on colab.
"""
class HyenaDNAModel(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
layer=None, attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, use_head=False, n_classes: int = 2,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.use_head = use_head
# check if layer (config) has d_model (HF code differs from main Safari code)
if 'd_model' not in layer:
layer['d_model'] = d_model
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, residual_in_fp32=residual_in_fp32,
**factory_kwargs, **kwargs
)
# we only need a head if doing classification, otherwise we'll use the
# hidden states as embeddings
if self.use_head:
self.head = SequenceDecoder(d_model=d_model, d_output=n_classes, l_output=0, mode='pool')
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
# if self.use_head:
# self.tie_weights()
# def tie_weights(self):
# self.head.weight = self.backbone.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids)
if self.use_head:
return self.head(hidden_states)
else:
return hidden_states
"""# Data pipeline
"""
#@title Tokenizer
"""
Just a simple character level tokenizer.
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
class CharacterTokenizer(PreTrainedTokenizer):
def __init__(self, characters: Sequence[str], model_max_length: int, padding_side: str='left', **kwargs):
"""Character tokenizer for Hugging Face transformers.
Args:
characters (Sequence[str]): List of desired characters. Any character which
is not included in this list will be replaced by a special token called
[UNK] with id=6. Following are list of all of the special tokens with
their corresponding ids:
"[CLS]": 0
"[SEP]": 1
"[BOS]": 2
"[MASK]": 3
"[PAD]": 4
"[RESERVED]": 5
"[UNK]": 6
an id (starting at 7) will be assigned to each character.
model_max_length (int): Model maximum sequence length.
"""
self.characters = characters
self.model_max_length = model_max_length
bos_token = AddedToken("[BOS]", lstrip=False, rstrip=False)
eos_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
sep_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
cls_token = AddedToken("[CLS]", lstrip=False, rstrip=False)
pad_token = AddedToken("[PAD]", lstrip=False, rstrip=False)
unk_token = AddedToken("[UNK]", lstrip=False, rstrip=False)
mask_token = AddedToken("[MASK]", lstrip=True, rstrip=False)
super().__init__(
bos_token=bos_token,
eos_token=sep_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
unk_token=unk_token,
add_prefix_space=False,
model_max_length=model_max_length,
padding_side=padding_side,
**kwargs,
)
self._vocab_str_to_int = {
"[CLS]": 0,
"[SEP]": 1,
"[BOS]": 2,
"[MASK]": 3,
"[PAD]": 4,
"[RESERVED]": 5,
"[UNK]": 6,
**{ch: i + 7 for i, ch in enumerate(characters)},
}
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
@property
def vocab_size(self) -> int:
return len(self._vocab_str_to_int)
def _tokenize(self, text: str) -> List[str]:
return list(text)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["[UNK]"])
def _convert_id_to_token(self, index: int) -> str:
return self._vocab_int_to_str[index]
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
result = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
result += ([0] * len(token_ids_1)) + [1]
return result
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = len(cls + token_ids_0 + sep) * [0]
if token_ids_1 is not None:
result += len(token_ids_1 + sep) * [1]
return result
def get_config(self) -> Dict:
return {
"char_ords": [ord(ch) for ch in self.characters],
"model_max_length": self.model_max_length,
}
@classmethod
def from_config(cls, config: Dict) -> "CharacterTokenizer":
cfg = {}
cfg["characters"] = [chr(i) for i in config["char_ords"]]
cfg["model_max_length"] = config["model_max_length"]
return cls(**cfg)
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
cfg = self.get_config()
with open(cfg_file, "w") as f:
json.dump(cfg, f, indent=4)
@classmethod
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
with open(cfg_file) as f:
cfg = json.load(f)
return cls.from_config(cfg)
| hyena-dna-main | standalone_hyenadna.py |
#@title Huggingface Pretrained Wrapper
"""
This is script is a simple HuggingFace wrapper around a HyenaDNA model, to enable a one click example
of how to load the pretrained weights and get embeddings.
It will instantiate a HyenaDNA model (model class is in the `standalone_hyenadna.py`), and handle the downloading of pretrained weights from HuggingFace.
Check out the colab notebook for a simpler and more complete walk through of how to use HyenaDNA with pretrained weights.
"""
import json
import os
import subprocess
import torch
# import transformers
from transformers import PreTrainedModel
import re
from standalone_hyenadna import HyenaDNAModel
from standalone_hyenadna import CharacterTokenizer
# helper 1
def inject_substring(orig_str):
"""Hack to handle matching keys between models trained with and without
gradient checkpointing."""
# modify for mixer keys
pattern = r"\.mixer"
injection = ".mixer.layer"
modified_string = re.sub(pattern, injection, orig_str)
# modify for mlp keys
pattern = r"\.mlp"
injection = ".mlp.layer"
modified_string = re.sub(pattern, injection, modified_string)
return modified_string
# helper 2
def load_weights(scratch_dict, pretrained_dict, checkpointing=False):
"""Loads pretrained (backbone only) weights into the scratch state dict."""
# loop thru state dict of scratch
# find the corresponding weights in the loaded model, and set it
# need to do some state dict "surgery"
for key, value in scratch_dict.items():
if 'backbone' in key:
# the state dicts differ by one prefix, '.model', so we add that
key_loaded = 'model.' + key
# breakpoint()
# need to add an extra ".layer" in key
if checkpointing:
key_loaded = inject_substring(key_loaded)
try:
scratch_dict[key] = pretrained_dict[key_loaded]
except:
raise Exception('key mismatch in the state dicts!')
# scratch_dict has been updated
return scratch_dict
class HyenaDNAPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
base_model_prefix = "hyenadna"
def __init__(self, config):
pass
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
@classmethod
def from_pretrained(cls,
path,
model_name,
download=False,
config=None,
device='cpu',
use_head=False,
n_classes=2,
):
# first check if it is a local path
pretrained_model_name_or_path = os.path.join(path, model_name)
if os.path.isdir(pretrained_model_name_or_path) and download == False:
if config is None:
config = json.load(open(os.path.join(pretrained_model_name_or_path, 'config.json')))
else:
hf_url = f'https://huggingface.co/LongSafari/{model_name}'
subprocess.run(f'rm -rf {pretrained_model_name_or_path}', shell=True)
command = f'mkdir -p {path} && cd {path} && git lfs install && git clone {hf_url}'
subprocess.run(command, shell=True)
if config is None:
config = json.load(open(os.path.join(pretrained_model_name_or_path, 'config.json')))
scratch_model = HyenaDNAModel(**config, use_head=use_head, n_classes=n_classes) # the new model format
loaded_ckpt = torch.load(
os.path.join(pretrained_model_name_or_path, 'weights.ckpt'),
map_location=torch.device(device)
)
# need to load weights slightly different if using gradient checkpointing
if config.get("checkpoint_mixer", False):
checkpointing = config["checkpoint_mixer"] == True or config["checkpoint_mixer"] == True
else:
checkpointing = False
# grab state dict from both and load weights
state_dict = load_weights(scratch_model.state_dict(), loaded_ckpt['state_dict'], checkpointing=checkpointing)
# scratch model has now been updated
scratch_model.load_state_dict(state_dict)
print("Loaded pretrained weights ok!")
return scratch_model
####################################################################################################
"""# Inference (450k to 1M tokens)!
If all you're interested in is getting embeddings on long DNA sequences
(inference), then we can do that right here in Colab!
* We provide an example how to load the weights from Huggingface.
* On the free tier, which uses a
T4 GPU w/16GB of memory, we can process 450k tokens / nucleotides.
* For processing 1M tokens, you'll need an A100, which Colab offers as a paid tier.
* (Don't forget to run the entire notebook above too)
--
To pretrain or fine-tune the 1M long sequence model (8 layers, d_model=256),
you'll need 8 A100s 80GB, and all that code is in the main repo!
"""
#@title Single example
import json
import os
import subprocess
# import transformers
from transformers import PreTrainedModel
def inference_single():
'''
this selects which backbone to use, and grabs weights/ config from HF
4 options:
'hyenadna-tiny-1k-seqlen' # fine-tune on colab ok
'hyenadna-small-32k-seqlen'
'hyenadna-medium-160k-seqlen' # inference only on colab
'hyenadna-medium-450k-seqlen' # inference only on colab
'hyenadna-large-1m-seqlen' # inference only on colab
'''
# you only need to select which model to use here, we'll do the rest!
pretrained_model_name = 'hyenadna-small-32k-seqlen'
max_lengths = {
'hyenadna-tiny-1k-seqlen': 1024,
'hyenadna-small-32k-seqlen': 32768,
'hyenadna-medium-160k-seqlen': 160000,
'hyenadna-medium-450k-seqlen': 450000, # T4 up to here
'hyenadna-large-1m-seqlen': 1_000_000, # only A100 (paid tier)
}
max_length = max_lengths[pretrained_model_name] # auto selects
# data settings:
use_padding = True
rc_aug = False # reverse complement augmentation
add_eos = False # add end of sentence token
# we need these for the decoder head, if using
use_head = False
n_classes = 2 # not used for embeddings only
# you can override with your own backbone config here if you want,
# otherwise we'll load the HF one in None
backbone_cfg = None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Using device:", device)
# instantiate the model (pretrained here)
if pretrained_model_name in ['hyenadna-tiny-1k-seqlen',
'hyenadna-small-32k-seqlen',
'hyenadna-medium-160k-seqlen',
'hyenadna-medium-450k-seqlen',
'hyenadna-large-1m-seqlen']:
# use the pretrained Huggingface wrapper instead
model = HyenaDNAPreTrainedModel.from_pretrained(
'./checkpoints',
pretrained_model_name,
download=True,
config=backbone_cfg,
device=device,
use_head=use_head,
n_classes=n_classes,
)
# from scratch
elif pretrained_model_name is None:
model = HyenaDNAModel(**backbone_cfg, use_head=use_head, n_classes=n_classes)
# create tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'], # add DNA characters, N is uncertain
model_max_length=max_length + 2, # to account for special tokens, like EOS
add_special_tokens=False, # we handle special tokens elsewhere
padding_side='left', # since HyenaDNA is causal, we pad on the left
)
#### Single embedding example ####
# create a sample 450k long, prepare
sequence = 'ACTG' * int(max_length/4)
tok_seq = tokenizer(sequence)
tok_seq = tok_seq["input_ids"] # grab ids
# place on device, convert to tensor
tok_seq = torch.LongTensor(tok_seq).unsqueeze(0) # unsqueeze for batch dim
tok_seq = tok_seq.to(device)
# prep model and forward
model.to(device)
with torch.inference_mode():
embeddings = model(tok_seq)
print(embeddings.shape) # embeddings here!
# # uncomment to run! (to get embeddings)
inference_single()
# to run this, just call:
# python huggingface.py
| hyena-dna-main | huggingface.py |
import copy
import os
import random
import time
from functools import partial, wraps
from typing import Callable, List, Sequence
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import wandb
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn
from pytorch_lightning.strategies.ddp import DDPStrategy
from tqdm.auto import tqdm
from pytorch_lightning.strategies.ddp import DDPStrategy
import src.models.nn.utils as U
import src.utils as utils
import src.utils.train
from src.dataloaders import SequenceDataset # TODO make registry
from src.tasks import decoders, encoders, tasks
from src.utils import registry
from src.utils.optim_groups import add_optimizer_hooks
log = src.utils.train.get_logger(__name__)
# Turn on TensorFloat32 (speeds up large model training substantially)
import torch.backends
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
OmegaConf.register_new_resolver('eval', eval)
OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y)
# Lots of annoying hacks to get WandbLogger to continuously retry on failure
class DummyExperiment:
"""Dummy experiment."""
def nop(self, *args, **kw):
pass
def __getattr__(self, _):
return self.nop
def __getitem__(self, idx) -> "DummyExperiment":
# enables self.logger.experiment[0].add_image(...)
return self
def __setitem__(self, *args, **kwargs) -> None:
pass
def rank_zero_experiment(fn: Callable) -> Callable:
"""Returns the real experiment on rank 0 and otherwise the DummyExperiment."""
@wraps(fn)
def experiment(self):
@rank_zero_only
def get_experiment():
return fn(self)
return get_experiment() or DummyExperiment()
return experiment
class CustomWandbLogger(WandbLogger):
def __init__(self, *args, **kwargs):
"""Modified logger that insists on a wandb.init() call and catches wandb's error if thrown."""
super().__init__(*args, **kwargs)
@property
@rank_zero_experiment
def experiment(self):
r"""
Actual wandb object. To use wandb features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
.. code-block:: python
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ["WANDB_MODE"] = "dryrun"
attach_id = getattr(self, "_attach_id", None)
if wandb.run is not None:
# wandb process already created in this instance
rank_zero_warn(
"There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"
" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`."
)
self._experiment = wandb.run
elif attach_id is not None and hasattr(wandb, "_attach"):
# attach to wandb process referenced
self._experiment = wandb._attach(attach_id)
else:
# create new wandb process
while True:
try:
self._experiment = wandb.init(**self._wandb_init)
break
except Exception as e:
print("wandb Exception:\n", e)
t = random.randint(30, 60)
print(f"Sleeping for {t} seconds")
time.sleep(t)
# define default x-axis
if getattr(self._experiment, "define_metric", None):
self._experiment.define_metric("trainer/global_step")
self._experiment.define_metric("*", step_metric="trainer/global_step", step_sync=True)
return self._experiment
class SequenceLightningModule(pl.LightningModule):
def __init__(self, config):
# Disable profiling executor. This reduces memory and increases speed.
try:
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
except AttributeError:
pass
super().__init__()
# Passing in config expands it one level, so can access by self.hparams.train instead of self.hparams.config.train
self.save_hyperparameters(config, logger=False)
# Dataset arguments
self.dataset = SequenceDataset.registry[self.hparams.dataset._name_](
**self.hparams.dataset
)
# Check hparams
self._check_config()
# PL has some bugs, so add hooks and make sure they're only called once
self._has_setup = False
self.setup() ## Added by KS
def setup(self, stage=None):
if not self.hparams.train.disable_dataset:
self.dataset.setup()
# We need to set up the model in setup() because for some reason when training with DDP, one GPU uses much more memory than the others
# In order to not overwrite the model multiple times during different stages, we need this hack
# TODO PL 1.5 seems to have an option to skip hooks to avoid this
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5410#issuecomment-762257024
if self._has_setup:
return
else:
self._has_setup = True
# Convenience feature: if model specifies encoder, combine it with main encoder
encoder_cfg = utils.to_list(self.hparams.encoder) + utils.to_list(
self.hparams.model.pop("encoder", None)
)
decoder_cfg = utils.to_list(
self.hparams.model.pop("decoder", None)
) + utils.to_list(self.hparams.decoder)
# Instantiate model
self.model = utils.instantiate(registry.model, self.hparams.model)
if (name := self.hparams.train.post_init_hook['_name_']) is not None:
kwargs = self.hparams.train.post_init_hook.copy()
del kwargs['_name_']
for module in self.modules():
if hasattr(module, name):
getattr(module, name)(**kwargs)
# Instantiate the task
self.task = utils.instantiate(
tasks.registry, self.hparams.task, dataset=self.dataset, model=self.model
)
# Create encoders and decoders
encoder = encoders.instantiate(
encoder_cfg, dataset=self.dataset, model=self.model
)
decoder = decoders.instantiate(
decoder_cfg, model=self.model, dataset=self.dataset
)
# Extract the modules so they show up in the top level parameter count
self.encoder = U.PassthroughSequential(self.task.encoder, encoder)
self.decoder = U.PassthroughSequential(decoder, self.task.decoder)
self.loss = self.task.loss
self.loss_val = self.task.loss
if hasattr(self.task, 'loss_val'):
self.loss_val = self.task.loss_val
self.metrics = self.task.metrics
self.train_torchmetrics = self.task.train_torchmetrics
self.val_torchmetrics = self.task.val_torchmetrics
self.test_torchmetrics = self.task.test_torchmetrics
def load_state_dict(self, state_dict, strict=False):
if self.hparams.train.pretrained_model_state_hook['_name_'] is not None:
model_state_hook = utils.instantiate(
registry.model_state_hook,
self.hparams.train.pretrained_model_state_hook.copy(),
partial=True,
)
state_dict = model_state_hook(self.model, state_dict)
print("Custom load_state_dict function is running.")
# strict==True will require all modules to match
# strict==False can allow encoder/decoder to be loaded from scratch too
return super().load_state_dict(state_dict, strict=strict)
def _check_config(self):
assert self.hparams.train.state.mode in [None, "none", "null", "reset", "bptt", "tbptt"]
assert (
(n := self.hparams.train.state.n_context) is None
or isinstance(n, int)
and n >= 0
)
assert (
(n := self.hparams.train.state.n_context_eval) is None
or isinstance(n, int)
and n >= 0
)
def _initialize_state(self):
"""Called at model setup and start of epoch to completely reset state"""
self._state = None
self._memory_chunks = []
def _reset_state(self, batch, device=None):
"""Called to construct default_state when necessary, e.g. during BPTT"""
device = device or batch[0].device
self._state = self.model.default_state(*batch[0].shape[:1], device=device)
def _detach_state(self, state):
if isinstance(state, torch.Tensor):
return state.detach()
elif isinstance(state, tuple):
return tuple(self._detach_state(s) for s in state)
elif isinstance(state, list):
return [self._detach_state(s) for s in state]
elif isinstance(state, dict):
return {k: self._detach_state(v) for k, v in state.items()}
elif state is None:
return None
else:
raise NotImplementedError
def _process_state(self, batch, batch_idx, train=True):
"""Handle logic for state context."""
# Number of context steps
key = "n_context" if train else "n_context_eval"
n_context = self.hparams.train.state.get(key)
# Don't need to do anything if 0 context steps. Make sure there is no state
if n_context == 0 and self.hparams.train.state.mode not in ['tbptt']:
self._initialize_state()
return
# Reset state if needed
if self.hparams.train.state.mode == "reset":
if batch_idx % (n_context + 1) == 0:
self._reset_state(batch)
# Pass through memory chunks
elif self.hparams.train.state.mode == "bptt":
self._reset_state(batch)
with torch.no_grad(): # should be unnecessary because individual modules should handle this
for _batch in self._memory_chunks:
self.forward(_batch)
# Prepare for next step
self._memory_chunks.append(batch)
self._memory_chunks = self._memory_chunks[-n_context:]
elif self.hparams.train.state.mode == 'tbptt':
_, _, z = batch
reset = z["reset"]
if reset:
self._reset_state(batch)
else:
self._state = self._detach_state(self._state)
# def forward(self, batch):
# """Passes a batch through the encoder, backbone, and decoder"""
# # z holds arguments such as sequence length
# x, y, *z = batch # z holds extra dataloader info such as resolution
# if len(z) == 0:
# z = {}
# else:
# assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
# z = z[0]
# x, w = self.encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
# x, state = self.model(x, **w, state=self._state)
# self._state = state
# x, w = self.decoder(x, state=state, **z)
# return x, y, w
def forward(self, batch):
return self.task.forward(batch, self.encoder, self.model, self.decoder, self._state)
def step(self, x_t):
x_t, *_ = self.encoder(x_t) # Potential edge case for encoders that expect (B, L, H)?
x_t, state = self.model.step(x_t, state=self._state)
self._state = state
# x_t = x_t[:, None, ...] # Dummy length
# x_t, *_ = self.decoder(x_t, state=state)
# x_t = x_t[:, 0, ...]
x_t, *_ = self.decoder.step(x_t, state=state)
return x_t
def _shared_step(self, batch, batch_idx, prefix="train"):
self._process_state(batch, batch_idx, train=(prefix == "train"))
x, y, w = self.forward(batch)
# Loss
if prefix == 'train':
loss = self.loss(x, y, **w)
else:
loss = self.loss_val(x, y, **w)
# Metrics
metrics = self.metrics(x, y, **w)
metrics["loss"] = loss
metrics = {f"{prefix}/{k}": v for k, v in metrics.items()}
# Calculate torchmetrics
torchmetrics = getattr(self, f'{prefix}_torchmetrics')
torchmetrics(x, y, loss=loss)
log_on_step = 'eval' in self.hparams and self.hparams.eval.get('log_on_step', False) and prefix == 'train'
self.log_dict(
metrics,
on_step=log_on_step,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
# log the whole dict, otherwise lightning takes the mean to reduce it
# https://pytorch-lightning.readthedocs.io/en/stable/visualize/logging_advanced.html#enable-metrics-for-distributed-training
self.log_dict(
torchmetrics,
on_step=log_on_step,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def on_train_epoch_start(self):
# Reset training torchmetrics
self.task._reset_torchmetrics("train")
def training_epoch_end(self, outputs):
# Log training torchmetrics
super().training_epoch_end(outputs)
def on_validation_epoch_start(self):
# Reset all validation torchmetrics
for name in self.val_loader_names:
self.task._reset_torchmetrics(name)
def validation_epoch_end(self, outputs):
# Log all validation torchmetrics
super().validation_epoch_end(outputs)
def on_test_epoch_start(self):
# Reset all test torchmetrics
for name in self.test_loader_names:
self.task._reset_torchmetrics(name)
def test_epoch_end(self, outputs):
# Log all test torchmetrics
super().test_epoch_end(outputs)
def training_step(self, batch, batch_idx, dataloader_idx=0):
loss = self._shared_step(batch, batch_idx, prefix="train")
# Log the loss explicitly so it shows up in WandB
# Note that this currently runs into a bug in the progress bar with ddp (as of 1.4.6)
# https://github.com/PyTorchLightning/pytorch-lightning/pull/9142
# We additionally log the epochs under 'trainer' to get a consistent prefix with 'global_step'
loss_epoch = {"trainer/loss": loss, "trainer/epoch": self.current_epoch}
self.log_dict(
loss_epoch,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
# Log any extra info that the models want to expose (e.g. output norms)
metrics = {}
for module in list(self.modules())[1:]:
if hasattr(module, "metrics"):
metrics.update(module.metrics)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def validation_step(self, batch, batch_idx, dataloader_idx=0):
ema = (
self.val_loader_names[dataloader_idx].endswith("/ema")
and self.optimizers().optimizer.stepped
) # There's a bit of an annoying edge case with the first (0-th) epoch; it has to be excluded due to the initial sanity check
if ema:
self.optimizers().swap_ema()
loss = self._shared_step(
batch, batch_idx, prefix=self.val_loader_names[dataloader_idx]
)
if ema:
self.optimizers().swap_ema()
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self._shared_step(
batch, batch_idx, prefix=self.test_loader_names[dataloader_idx]
)
def configure_optimizers(self):
# Set zero weight decay for some params
if 'optimizer_param_grouping' in self.hparams.train:
add_optimizer_hooks(self.model, **self.hparams.train.optimizer_param_grouping)
# Normal parameters
all_params = list(self.parameters())
params = [p for p in all_params if not hasattr(p, "_optim")]
optimizer = utils.instantiate(registry.optimizer, self.hparams.optimizer, params)
del self.hparams.optimizer._name_
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_params if hasattr(p, "_optim")]
hps = [
# dict(s) for s in set(frozenset(hp.items()) for hp in hps)
dict(s) for s in sorted(list(dict.fromkeys(frozenset(hp.items()) for hp in hps)))
# dict(s) for s in dict.fromkeys(frozenset(hp.items()) for hp in hps)
] # Unique dicts
print("Hyperparameter groups", hps)
for hp in hps:
params = [p for p in all_params if getattr(p, "_optim", None) == hp]
optimizer.add_param_group(
{"params": params, **self.hparams.optimizer, **hp}
)
### Layer Decay ###
if self.hparams.train.layer_decay['_name_'] is not None:
get_num_layer = utils.instantiate(
registry.layer_decay,
self.hparams.train.layer_decay['_name_'],
partial=True,
)
# Go through all parameters and get num layer
layer_wise_groups = {}
num_max_layers = 0
for name, p in self.named_parameters():
# Get layer id for each parameter in the model
layer_id = get_num_layer(name)
# Add to layer wise group
if layer_id not in layer_wise_groups:
layer_wise_groups[layer_id] = {
'params': [],
'lr': None,
'weight_decay': self.hparams.optimizer.weight_decay
}
layer_wise_groups[layer_id]['params'].append(p)
if layer_id > num_max_layers: num_max_layers = layer_id
# Update lr for each layer
for layer_id, group in layer_wise_groups.items():
group['lr'] = self.hparams.optimizer.lr * (self.hparams.train.layer_decay.decay ** (num_max_layers - layer_id))
# Reset the torch optimizer's param groups
optimizer.param_groups = []
for layer_id, group in layer_wise_groups.items():
optimizer.add_param_group(group)
# Print optimizer info for debugging
keys = set([k for hp in hps for k in hp.keys()]) # Special hparams
utils.train.log_optimizer(log, optimizer, keys)
# Configure scheduler
if "scheduler" not in self.hparams:
return optimizer
lr_scheduler = utils.instantiate(
registry.scheduler, self.hparams.scheduler, optimizer
)
scheduler = {
"scheduler": lr_scheduler,
"interval": self.hparams.train.interval, # 'epoch' or 'step'
"monitor": self.hparams.train.monitor,
"name": "trainer/lr", # default is e.g. 'lr-AdamW'
}
# See documentation for how to configure the return
# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.core.lightning.html#pytorch_lightning.core.lightning.LightningModule.configure_optimizers
return [optimizer], [scheduler]
def train_dataloader(self):
return self.dataset.train_dataloader(**self.hparams.loader)
def _eval_dataloaders_names(self, loaders, prefix):
"""Process loaders into a list of names and loaders"""
if utils.is_dict(loaders):
return [
f"{prefix}/{k}" if k is not None else prefix for k in loaders.keys()
], list(loaders.values())
elif utils.is_list(loaders):
return [f"{prefix}/{i}" for i in range(len(loaders))], loaders
else:
return [prefix], [loaders]
def _eval_dataloaders(self):
# Return all val + test loaders
val_loaders = self.dataset.val_dataloader(**self.hparams.loader)
test_loaders = self.dataset.test_dataloader(**self.hparams.loader)
val_loader_names, val_loaders = self._eval_dataloaders_names(val_loaders, "val")
test_loader_names, test_loaders = self._eval_dataloaders_names(
test_loaders, "test"
)
# Duplicate datasets for ema
if self.hparams.train.ema > 0.0:
val_loader_names += [name + "/ema" for name in val_loader_names]
val_loaders = val_loaders + val_loaders
test_loader_names += [name + "/ema" for name in test_loader_names]
test_loaders = test_loaders + test_loaders
# adding option to only have val loader at eval (eg if test is duplicate)
if self.hparams.train.get("remove_test_loader_in_eval", False):
return val_loader_names, val_loaders
# adding option to only have test loader at eval
elif self.hparams.train.get("remove_val_loader_in_eval", False):
return test_loader_names, test_loaders
# default behavior is to add test loaders in eval
else:
return val_loader_names + test_loader_names, val_loaders + test_loaders
def val_dataloader(self):
val_loader_names, val_loaders = self._eval_dataloaders()
self.val_loader_names = val_loader_names
return val_loaders
def test_dataloader(self):
test_loader_names, test_loaders = self._eval_dataloaders()
self.test_loader_names = ["final/" + name for name in test_loader_names]
return test_loaders
### pytorch-lightning utils and entrypoint ###
def create_trainer(config, **kwargs):
callbacks: List[pl.Callback] = []
logger = None
# WandB Logging
if config.get("wandb") is not None:
# Pass in wandb.init(config=) argument to get the nice 'x.y.0.z' hparams logged
# Can pass in config_exclude_keys='wandb' to remove certain groups
import wandb
logger = CustomWandbLogger(
config=utils.to_dict(config, recursive=True),
settings=wandb.Settings(start_method="fork"),
**config.wandb,
)
# Lightning callbacks
if "callbacks" in config:
for _name_, callback in config.callbacks.items():
if config.get("wandb") is None and _name_ in ["learning_rate_monitor"]:
continue
log.info(f"Instantiating callback <{registry.callbacks[_name_]}>")
callback._name_ = _name_
callbacks.append(utils.instantiate(registry.callbacks, callback))
# Add ProgressiveResizing callback
if config.callbacks.get("progressive_resizing", None) is not None:
num_stages = len(config.callbacks.progressive_resizing.stage_params)
print(f"Progressive Resizing: {num_stages} stages")
for i, e in enumerate(config.callbacks.progressive_resizing.stage_params):
# Stage params are resolution and epochs, pretty print
print(f"\tStage {i}: {e['resolution']} @ {e['epochs']} epochs")
# Configure ddp automatically
n_devices = config.trainer.get('devices', 1)
if isinstance(n_devices, Sequence): # trainer.devices could be [1, 3] for example
n_devices = len(n_devices)
if n_devices > 1 and config.trainer.get('strategy', None) is None:
config.trainer.strategy = dict(
_target_='pytorch_lightning.strategies.DDPStrategy',
find_unused_parameters=False,
gradient_as_bucket_view=True, # https://pytorch-lightning.readthedocs.io/en/stable/advanced/advanced_gpu.html#ddp-optimizations
)
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
# special processing for seqlen warmup scheduler (reload)
if config.callbacks.get("seqlen_warmup_reload", None) is not None:
# we need to instantiate manually instead of with hydra, since it expects a dict instead of a hydra config for the accumulate_grad_batches
# so we convert everything to dicts (from hydra configs)
trainer_config_dict = dict(config.trainer)
epochs_cume = 0 # track cumulative epochs
accumulate_grad_schedule = {} # contains the accumulate_grad_batches schedule to init the trainer
for stage in config.callbacks.seqlen_warmup_reload.stage_params:
batch_size = stage['batch_size'] # curr batch size at this stage
grad_accum_factor = config.train.global_batch_size // batch_size # grad accum factor for this stage
accumulate_grad_schedule[epochs_cume] = grad_accum_factor # set the grad accum factor for this stage
epochs_cume += stage['epochs'] # increment epochs_cume for next stage
trainer_config_dict['accumulate_grad_batches'] = accumulate_grad_schedule # set the accumulate_grad_batches schedule
trainer_config_dict.pop('_target_') # only hydra uses this to instantiate
# Set DDPStrategy to work with pl.Trainer
config.trainer.pop('strategy')
trainer_config_dict['strategy'] = DDPStrategy(find_unused_parameters=False, gradient_as_bucket_view=True)
trainer = pl.Trainer(**trainer_config_dict, callbacks=callbacks, logger=logger)
else:
trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger)
return trainer
def train(config):
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
trainer = create_trainer(config)
model = SequenceLightningModule(config)
# Load pretrained_model if specified
if config.train.get("pretrained_model_path", None) is not None:
# PTL style. Note, method returns a new model object, and need to pass config.
model = SequenceLightningModule.load_from_checkpoint(
config.train.pretrained_model_path,
config=config,
strict=config.train.pretrained_model_strict_load,
)
# Run initial validation epoch (useful for debugging, finetuning)
if config.train.validate_at_start:
print("Running validation before training")
trainer.validate(model)
if config.train.ckpt is not None:
trainer.fit(model, ckpt_path=config.train.ckpt)
else:
trainer.fit(model)
if config.train.test:
trainer.test(model)
@hydra.main(config_path="configs", config_name="config.yaml")
def main(config: OmegaConf):
# Process config:
# - register evaluation resolver
# - filter out keys used only for interpolation
# - optional hooks, including disabling python warnings or debug friendly configuration
config = utils.train.process_config(config)
# Pretty print config using Rich library
utils.train.print_config(config, resolve=True)
train(config)
if __name__ == "__main__":
main() | hyena-dna-main | train.py |
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
def fftconv_ref(u, k, D, dropout_mask):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
return (F.gelu(out) * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
def fftconv_fast(u, k, D, dropout_mask):
"""Fuse padding + rfft + pointwise mult + ifft + multiply with D + gelu + dropout
"""
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size)
out = fftconv_fwd(u, k_f, D, dropout_mask, fft_size)
return out
def fftconv_fast_bwd(dout, u, k, D, dropout_mask=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size)
dx, dk_f, dD = fftconv_bwd(dout, u, k_f, D, dropout_mask, fft_size)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
return dx, dk, dD
device = 'cuda'
dtype = torch.float32
# dtype = torch.float16
batch_size = 64
H = 256
fft_size = 2048
seqlen = 1024
dropout_prob = 0.37
torch.manual_seed(0)
u = torch.randn(batch_size, H, seqlen, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(H, seqlen, device=device, requires_grad=True)
D = torch.randn(H, device=device, requires_grad=True)
dropout_mask = F.dropout(torch.ones(batch_size, H, device=device), dropout_prob)
out = fftconv_ref(u, k, D, dropout_mask)
out = fftconv_fast(u, k, D, dropout_mask)
g = torch.randn_like(out)
fftconv_fast_bwd(g, u, k, D, dropout_mask)
| hyena-dna-main | csrc/fftconv/launch_fftconv.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("fftconv")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
'fftconv', [
'fftconv.cpp',
'fftconv_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': ['-O3', '--threads', '4', '-lineinfo', '--use_fast_math', '-std=c++17', '-arch=compute_70']
# extra_compile_args={'cxx': ['-O3'],
# 'nvcc': append_nvcc_threads(['-O3', '-lineinfo', '--use_fast_math', '-std=c++17'] + cc_flag)
},
include_dirs=[os.path.join(this_dir, 'mathdx/22.02/include')]
)
)
torch.utils.cpp_extension.COMMON_NVCC_FLAGS.remove('-D__CUDA_NO_HALF2_OPERATORS__')
setup(
name="fftconv",
version="0.1",
description="FFTConv for state-space models",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| hyena-dna-main | csrc/fftconv/setup.py |
import math
import re
import numpy as np
# N = 8192
N = 16384
# The case of 0 / N is special, we want to simplify it to 0 / 2 instead of 0 / 1
numerator = np.arange(1, N // 8 + 1)
gcd = np.gcd(numerator, N)
num = numerator // gcd
denom = N // gcd
lut_vals = ['T_2_0'] + [f'T_{d}_{n}' for n, d in zip(num, denom)]
lut_string = f"static const __device__ float2 lut_mine_sp_8_{N}[{N // 8 + 1}] = {{\n {','.join(lut_vals)}\n}};"
print(lut_string)
# Only define new values if it's not already in the cuFFTDx lookup table
cufftdx_lut_filename = 'mathdx/22.02/include/cufftdx/include/database/lut_defines_0.hpp.inc'
matches = set()
reg = re.compile(f'^#define T_{N}_([0-9]+) ')
with open(cufftdx_lut_filename, 'r') as f:
for line in f:
if (match := reg.match(line)) is not None:
matches.add(int(match[1]))
numerator = np.arange(1, N // 8 + 1, 2)
angle = -2 * math.pi * numerator.astype(np.float64) / N
cos, sin = np.cos(angle), np.sin(angle)
defs = [f'#define T_{N}_{n} {{{c:.40f},{s:.40f}}}' for n, c, s in zip(numerator, cos, sin) if n not in matches]
def_string = '\n'.join(defs)
print(def_string)
| hyena-dna-main | csrc/fftconv/lut_code_gen.py |
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional as F
import pytorch_lightning as pl
from einops import rearrange, repeat
import sys, os
FILEDIR = os.path.realpath(__file__)
sys.path.append(os.path.join(FILEDIR, '..'))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from src.dataloaders.icl_genomics_dataloader import ICLGenomics
from src.dataloaders.genomics import ICLGenomics
def exists(x):
return x is not None
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def soft_prompting():
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt_path", help="Path to pretrained model checkpoint")
parser.add_argument("--dataset", default='none')
parser.add_argument("--config", default='./configs/evals/soft_prompting_genomics.yaml')
parser.add_argument("--results", default='./results/soft_prompting')
args = parser.parse_args()
os.makedirs(args.results, exist_ok=True)
# load configs
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
cfg_model = config['model'].copy()
cfg_dataset = config['dataset'].copy()
cfg_tuning = config['tuning'].copy()
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
rng = np.random.RandomState(config['seed'])
# dataset_name num_seqs num_classes median_len std
# dummy_mouse_enhancers_ensembl 1210 2 2381 984.4
# demo_coding_vs_intergenomic_seqs 100_000 2 200 0
# demo_human_or_worm 100_000 2 200 0
# human_enhancers_cohn 27791 2 500 0
# human_enhancers_ensembl 154842 2 269 122.6
# human_ensembl_regulatory 289061 3 401 184.3
# human_nontata_promoters 36131 2 251 0
# human_ocr_ensembl 174756 2 315 108.1
# chrom_names = [
# 'chr11', 'chr13', 'chr15', 'chr17', 'chr19', 'chr21', 'chr2', 'chr4', 'chr6', 'chr8', 'chr10', 'chr12',
# 'chr14', 'chr16', 'chr18', 'chr20', 'chr22', 'chrX', 'chrY', 'chr1', 'chr3', 'chr5', 'chr7', 'chr9'
# ]
nuc_chars = list('ACGTN')
characters = nuc_chars # + chrom_names
label_to_token = {0: 'A', 1: 'N'}
datasets = {
'dummy_mouse_enhancers_ensembl': {
'max_length': 3200,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
# 'demo_coding_vs_intergenomic_seqs': {
# 'max_length': 202,
# 'd_output': 2,
# 'characters': characters,
# 'label_to_token': label_to_token
# },
# 'demo_human_or_worm': {
# 'max_length': 202,
# 'd_output': 2,
# 'characters': characters,
# 'label_to_token': label_to_token,
# },
'human_enhancers_cohn': {
'max_length': 502,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_nontata_promoters': {
'max_length': 251, #253
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_enhancers_ensembl': {
'max_length': 320,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_ensembl_regulatory': {
'max_length': 600,
'd_output': 3,
'characters': characters,
'label_to_token': {0: 'A', 1: 'G', 2: 'N'},
},
'human_ocr_ensembl': {
'max_length': 420,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
}
}
df_results = []
df_i = 0
ds_iter = datasets.items() if args.dataset=='none' else zip([args.dataset], [datasets[args.dataset]])
for dataset, dataset_cfg in ds_iter:
print(f'\nDataset {dataset}...')
for shots in cfg_dataset['shots']:
print(f'...with {shots} shots...')
cfg = cfg_dataset.copy()
cfg.update(dataset_cfg)
cfg['dataset_name'] = dataset
cfg['shots'] = shots
loader = ICLGenomics(**cfg)
loader.setup()
for soft_tokens in cfg_tuning['soft_tokens']:
print(f'...and {soft_tokens} soft tokens...')
# print('Pretrained model...')
pretrained_model = load_model(
cfg_model=cfg_model,
ckpt_path=args.ckpt_path,
n_soft_tokens=soft_tokens,
soft_token_pdrop=cfg_tuning['soft_token_pdrop'],
max_length=cfg['max_length'] if shots>0 else None
)
pretrained_model.to(DEVICE)
if soft_tokens>0: # we only tune when using soft tokens!
print('...tuning...')
pretrained_model = tune_model(
pretrained_model, #deepcopy(pretrained_model).to(DEVICE),
loader,
cfg_tuning,
rng=rng
)
print('...evaluating...')
acc = eval_on_loaders(pretrained_model, {dataset: loader})[dataset]
df_results.append(
pd.DataFrame({
'dataset': dataset,
'model': 'pretrained',
'shots': shots,
'soft_tokens': soft_tokens,
'eval_acc': acc
}, index=[df_i])
)
df_i += 1
pd.concat(df_results).to_csv(
os.path.join(
args.results,
f'soft_prompting_performance_{dataset}.csv'
)
)
del pretrained_model
def load_model(
cfg_model: tp.Dict,
ckpt_path: str=None,
n_soft_tokens: int=0,
soft_token_pdrop: float=0.,
max_length: int=None
):
model = ConvLMHeadModel(**cfg_model)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(model_state_dict)
return LitModel(model, n_soft_tokens=n_soft_tokens, soft_token_pdrop=soft_token_pdrop, max_length=max_length)
class LitModel(pl.LightningModule):
def __init__(self,
model,
n_soft_tokens: int=0,
soft_token_pdrop: float=0.,
max_length: int=None
):
super().__init__()
self.model = model
requires_grad(self.model, False) # we only want to train soft tokens
self.max_length = max_length
d_model = self.model.lm_head.weight.shape[1]
self.n_soft_tokens = n_soft_tokens
soft_tokens = torch.nn.Parameter(torch.zeros(n_soft_tokens, d_model)) if n_soft_tokens>0 else None
if exists(soft_tokens):
torch.nn.init.normal_(soft_tokens, mean=0.0, std=0.02)
self.soft_tokens = soft_tokens
self.soft_tokens_drop = torch.nn.Dropout(soft_token_pdrop) if soft_token_pdrop>0 else torch.nn.Identity()
def forward(self, x: torch.Tensor):
# get embeddings
with torch.no_grad():
hidden_states = self.model.backbone.embeddings(x)
# attach soft tokens
if exists(self.soft_tokens):
hidden_states = torch.cat([
repeat(self.soft_tokens_drop(self.soft_tokens), 'n d -> b n d', b=hidden_states.shape[0]),
hidden_states
], dim=1)
# forward
residual = None
for layer in self.model.backbone.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.model.backbone.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.model.backbone.ln_f(residual.to(dtype=self.model.backbone.ln_f.weight.dtype))
return self.model.lm_head(hidden_states)
def step(self, batch: tp.Tuple[torch.Tensor], phase: str='train'):
# get ys
x, y = batch['x'].to(DEVICE), batch['y'].to(DEVICE)
labels_idx = x.shape[1]-1
if exists(self.max_length):
x = torch.cat([x, y], dim=1)
labels_idx = self.get_labels_idx(x)
y = x[:,labels_idx]
# forward
logits = self(x)
logits = logits[:,self.n_soft_tokens:] # we exclude soft tokens
logits = logits[:,labels_idx-1] # previous token predicts target
if logits.ndim>2:
logits = rearrange(logits, 'b n c -> (b n) c')
if y.ndim==2:
y = rearrange(y, 'b n -> (b n)')
# compute loss/acc
loss = F.cross_entropy(logits, y)
preds = logits.argmax(axis=-1)
acc = torch.mean((preds==y).to(torch.float32))
return {'loss': loss, 'acc': acc}
def get_labels_idx(self, x):
return np.concatenate([
[self.max_length+1],
np.arange((2*self.max_length)+4, x.shape[1], self.max_length+3)
])
def tune_model(model, loader, cfg_tuning, verbose: bool=True, rng: np.random.RandomState=None):
rng = np.random.RandomState(0) if rng is None else rng
optimizer = torch.optim.AdamW(
model.parameters(),
weight_decay=float(cfg_tuning['weight_decay']),
lr=float(cfg_tuning['lr'])
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='min',
factor=0.1,
patience=0
)
best_model = deepcopy(model)
requires_grad(best_model, False)
step = 0
losses, accs, val_losses = [], [], []
for epoch in range(cfg_tuning['max_epochs']):
if verbose:
print(f'Epoch {epoch}...')
# train epoch:
model.train()
for i, (x,y) in enumerate(loader.train_dataloader()):
batch = {'x': x, 'y': y}
model.on_train_batch_start(batch=batch, batch_idx=step)
with torch.cuda.amp.autocast():
out = model.step(batch)
loss, acc = out['loss'], out['acc']
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg_tuning.get('gradient_clip_val', 1.0))
losses.append(loss.cpu().detach().numpy().mean())
accs.append(acc.cpu().detach().numpy())
# accumulate gradients of N batches
if (i + 1) % cfg_tuning['accumulate_grad_batches'] == 0:
optimizer.step()
optimizer.zero_grad()
# update_ema(ema, model, decay=cfg_tuning['ema_decay'])
step += 1
# eval epoch:
model.eval()
val_loss = []
with torch.no_grad():
for x, y in loader.val_dataloader():
batch = {'x': x, 'y': y}
model.on_train_batch_start(batch=batch, batch_idx=step)
out = model.step(batch)
loss, acc = out['loss'], out['acc']
val_loss.append(loss.cpu().detach().numpy())
val_losses.append(np.mean(val_loss))
if val_losses[-1]==np.min(val_losses): # also covers first epoch
update_ema(best_model, model, decay=0)
scheduler.step(val_losses[-1])
if verbose:
print(f'\tstep {step}; avg. val loss: {val_losses[-1]:1.4f}')
if (epoch > 0 and sum(val_losses[-1] >= val_losses[:-1])>1) or (epoch+1)>=cfg_tuning['max_epochs']:
break
best_model = best_model.to(DEVICE)
requires_grad(best_model, True) # we turn grads back on for completion, even though model will not be trained further...
return best_model #, ema
@torch.no_grad()
def update_ema(ema_model, model, decay=0.999):
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def eval_on_loaders(model, loaders):
results = {}
for name, loader in loaders.items():
print(f'Evaluating on {name} data...')
all_acc = []
val_loader = loader.val_dataloader()
for x,y in tqdm(val_loader):
x = x.to(DEVICE)
with torch.no_grad():
logits = model(x)
logits = logits[:, -1]
logits = logits.cpu().detach().numpy()
batch_preds = logits.argmax(axis=-1)
# batch_preds = np.array(batch_preds)
y = y.cpu().detach().numpy()
batch_preds = batch_preds.flatten()
y = y.flatten()
acc = (batch_preds == y).mean()
all_acc.append(acc)
results[name] = np.mean(all_acc)
print(f"{name}; full eval. accuracy: {results[name]:1.4f}")
return results
if __name__ == "__main__":
soft_prompting() | hyena-dna-main | evals/soft_prompting_genomics.py |
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional as F
import pytorch_lightning as pl
from einops import rearrange
import sys, os
FILEDIR = os.path.realpath(__file__)
sys.path.append(os.path.join(FILEDIR, '..'))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from src.dataloaders.icl_genomics_dataloader import ICLGenomics
from src.dataloaders.genomics import ICLGenomics
# TODO:
# Make use of maximum long context: either put entire downstream dataset in context
# or add many tunable soft tokens (soft prompting)!
# -> just fill the context up one way or another and show whats possible!
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def instruction_tuned_ICL():
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt_path", help="Path to pretrained model checkpoint")
parser.add_argument("--config", default='./configs/evals/instruction_tuned_genomics.yaml')
parser.add_argument("--results", default='./results/instruction_tuned_genomics')
args = parser.parse_args()
os.makedirs(args.results, exist_ok=True)
# load configs
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
cfg_model = config['model'].copy()
cfg_dataset = config['dataset'].copy()
cfg_tuning = config['tuning'].copy()
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
rng = np.random.RandomState(config['seed'])
# dataset_name num_seqs num_classes median_len std
# dummy_mouse_enhancers_ensembl 1210 2 2381 984.4
# demo_coding_vs_intergenomic_seqs 100_000 2 200 0
# demo_human_or_worm 100_000 2 200 0
# human_enhancers_cohn 27791 2 500 0
# human_enhancers_ensembl 154842 2 269 122.6
# human_ensembl_regulatory 289061 3 401 184.3
# human_nontata_promoters 36131 2 251 0
# human_ocr_ensembl 174756 2 315 108.1
nuc_chars = list('ACGTN')
characters = nuc_chars # + chrom_names
label_to_token = {0: 'A', 1: 'N'}
datasets = {
'human_enhancers_cohn': {
'max_length': 502,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_nontata_promoters': {
'max_length': 251, #253
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_enhancers_ensembl': {
'max_length': 320,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_ensembl_regulatory': {
'max_length': 600,
'd_output': 3,
'characters': characters,
'label_to_token': {0: 'A', 1: 'G', 2: 'N'},
},
'human_ocr_ensembl': {
'max_length': 420,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
}
}
print('\n\nEvaluating instruction-tuned ICL performance... ')
df_results = []
df_i = 0
for tuning_samples in cfg_tuning['tuning_samples']:
print(f'...when tuning on {tuning_samples} samples...')
for shots in cfg_dataset['shots']:
print(f'...with {shots} shots...')
for dataset, dataset_cfg in datasets.items():
print(f'...from dataset {dataset}...')
print(f'Collecting tuning data...')
cfg = cfg_dataset.copy()
cfg.update(dataset_cfg)
cfg['dataset_name'] = dataset
cfg['shots'] = shots
loader = ICLGenomics(**cfg)
loader.setup()
# collect tuning samples
tuning_X = []
train_loader = iter(loader.train_dataloader())
samples_collected = 0
for x, y in tqdm(train_loader):
n = min(tuning_samples, x.shape[0])
tuning_X.append(torch.cat([x[:n], y[:n]], dim=1))
samples_collected += n
if samples_collected >= tuning_samples:
print(f'...stop becuase {tuning_samples} samples collected.')
break
tuning_X = torch.cat(tuning_X, dim=0)
if shots>0:
tuning_y_idx = np.concatenate([
[cfg['max_length']+1],
np.arange((2*cfg['max_length'])+4, tuning_X.shape[1], cfg['max_length']+3)
])
else:
tuning_y_idx = cfg['max_length']+1
tuning_y = tuning_X[:,tuning_y_idx]
tuning_loss_mask = tuning_y_idx-1 # prediction is always from previous token
print('Tuning pretrained model...')
pretrained_model = load_model(cfg_model, args.ckpt_path)
pretrained_model.to(DEVICE)
tuned_pretrained_model = tune_model(
deepcopy(pretrained_model).to(DEVICE),
tuning_X,
tuning_y,
cfg_tuning,
loss_mask=tuning_loss_mask,
rng=rng
)
# print('Tuning untrained model...')
# scratch_model = load_model(cfg_model)
# scratch_model.to(DEVICE)
# tuned_scratch_model = tune_model(
# scratch_model,
# tuning_X,
# tuning_y,
# cfg_tuning,
# loss_mask=tuning_loss_mask,
# rng=rng
# )
print('Evaluating ICL performance...')
for label, model in zip(
['tuned_pretrained'], #, 'scratchtrained'
[tuned_pretrained_model] # tuned_scratch_model
):
print(f'{label}:')
acc = eval_on_loaders(model, {dataset: loader})[dataset]
df_results.append(
pd.DataFrame({
'dataset': dataset,
'tuning_samples': tuning_samples,
'model': label,
'shots': shots,
'eval_acc': acc
}, index=[df_i])
)
df_i += 1
pd.concat(df_results).to_csv(
os.path.join(args.results, 'instruction_tuned_genomics.csv')
)
def load_model(cfg_model, ckpt_path: str=None):
model = ConvLMHeadModel(**cfg_model)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(model_state_dict)
return LitModel(model)
class LitModel(pl.LightningModule):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x: torch.Tensor):
return self.model(x)[0]
def step(self, batch: tp.Tuple[torch.Tensor], loss_mask: tp.Union[int, np.ndarray]=-1, phase: str='train'):
x, y = batch['x'].to(DEVICE), batch['y'].to(DEVICE)
loss_mask = -1 if loss_mask is None else loss_mask
out = self(x)
logits = out.logits[:,loss_mask]
if logits.ndim>2:
logits = rearrange(logits, 'b n c -> (b n) c')
if y.ndim==2:
y = rearrange(y, 'b n -> (b n)')
loss = F.cross_entropy(logits, y)
preds = logits.argmax(axis=-1)
acc = torch.mean((preds==y).to(torch.float32))
return {'loss': loss, 'acc': acc}
def tune_model(model, X, y, cfg_tuning, max_epochs: int=1, loss_mask=None, verbose: bool=True, rng: np.random.RandomState=None):
rng = np.random.RandomState(0) if rng is None else rng
# # we use expected moving average of model for downstream ICL...
# ema = deepcopy(model).to(DEVICE)
# requires_grad(ema, False)
# update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights
# ema.eval()
optimizer = torch.optim.AdamW(
model.parameters(),
weight_decay=float(cfg_tuning['weight_decay']),
lr=float(cfg_tuning['lr'])
)
# split train/eval
n_samples = X.shape[0]
train_idx = np.arange(n_samples)
batch_size = min(len(train_idx), cfg_tuning['batch_size'])
epoch = 0
step = 0
losses, accs = [], []
stop_training = False
while not stop_training:
if verbose:
print(f'Epoch {epoch}...')
# train epoch:
model.train()
rng.shuffle(train_idx)
batch_i, batch_start = 0, 0
while batch_start+batch_size <= len(train_idx):
idx = train_idx[batch_start:batch_start+batch_size]
batch = {'x': X[idx], 'y': y[idx]}
model.on_train_batch_start(batch=batch, batch_idx=step)
out = model.step(batch, loss_mask=loss_mask)
loss, acc = out['loss'], out['acc']
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg_tuning.get('gradient_clip_val', 1.0))
losses.append(loss.cpu().detach().numpy().mean())
accs.append(acc.cpu().detach().numpy())
# accumulate gradients of N batches
if (batch_i + 1) % cfg_tuning['accumulate_grad_batches'] == 0:
optimizer.step()
optimizer.zero_grad()
# update_ema(ema, model, decay=cfg_tuning['ema_decay'])
step += 1
print(f'step: {step}; train loss: {losses[-1]}, acc: {accs[-1]}')
batch_start += batch_size
batch_i += 1
epoch += 1
if epoch>=max_epochs:
stop_training = True
return model #, ema
@torch.no_grad()
def update_ema(ema_model, model, decay=0.999):
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def eval_on_loaders(model, loaders):
results = {}
for name, loader in loaders.items():
print(f'Evaluating on {name} data...')
all_acc = []
val_loader = loader.val_dataloader()
for batch in tqdm(val_loader):
x, y = batch
x = x.to(DEVICE)
with torch.no_grad():
out = model(x)
if type(out) == tuple: out = out[0]
logits = out.logits[:, -1]
logits = logits.cpu().detach().numpy()
batch_preds = logits.argmax(axis=-1)
# batch_preds = np.array(batch_preds)
y = y.cpu().detach().numpy()
batch_preds = batch_preds.flatten()
y = y.flatten()
acc = (batch_preds == y).mean()
all_acc.append(acc)
results[name] = np.mean(all_acc)
print(f"{name}; full eval. accuracy: {results[name]:1.4f}")
return results
if __name__ == "__main__":
instruction_tuned_ICL() | hyena-dna-main | evals/instruction_tuned_genomics.py |
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
from src.models.sequence.long_conv_lm import DNAEmbeddingModel
from src.tasks.decoders import SequenceDecoder
from src.dataloaders import SequenceDataset
import numpy as np
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from src.dataloaders.genomic_bench_dataloader import GenomicBenchmark
from src.dataloaders.nucleotide_transformer_dataloader import NucleotideTransformer
try:
from tokenizers import Tokenizer
except:
pass
genomic_benchmark_datasets = ["dummy_mouse_enhancers_ensembl", "demo_coding_vs_intergenomic_seqs", "demo_human_or_worm", "human_enhancers_cohn", "human_enhancers_ensembl", "human_ensembl_regulatory", "human_nontata_promoters", "human_ocr_ensembl"]
nucleotide_datasets = [""]
class HG38Inference:
'''Model (backbone + decoder) inference, initially for enhancer model, but can be modified for other classification tasks as well.
model_cfg, dict: config for entire model, backbone and decoder head
ckpt_path, str: path to config
max_seq_len, int: max seq len of model (technically in the model_cfg already, but more explicit)
'''
def __init__(self, cfg, ckpt_path, max_seq_len, use_dataloader=False):
self.max_seq_len = max_seq_len
self.backbone, self.decoder, self.tokenizer = self.load_model(cfg, ckpt_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.backbone = self.backbone.to(self.device)
self.decoder = self.decoder.to(self.device)
# load dataloader if given
if use_dataloader:
self.loader = self.get_dataloader(cfg)
def get_dataloader(self, config):
cfg = yaml.load(open(config, 'r'), Loader=yaml.FullLoader)
dataset_name = cfg['dataset']["dataset_name"]
if dataset_name in genomic_benchmark_datasets:
loader = GenomicBenchmark(**cfg['dataset'])
else:
# assume the rest are in the nucleotide trans datasets
loader = NucleotideTransformer(**cfg['dataset'])
loader.setup()
return loader
def predict_on_list(self, seqs):
"""
makes predictions just given a list of string sequences, handles all the tokenizers, and tensor conversion
"""
preds = []
# sample code to loop thru each sample and tokenize first (char level)
for seq in tqdm(seqs):
if isinstance(self.tokenizer, Tokenizer):
seq = self.tokenizer.encode(seq).ids
else:
seq = self.tokenizer.encode(seq)
# can accept a batch, shape [B, seq_len, hidden_dim]
embeddings, _ = self.backbone(torch.tensor([seq]).to(device=self.device))
pred = self.decoder(embeddings)
preds.append(pred)
# we provide the predictions (you can pass back embeddings if you wish)
return preds
def predict_from_loader(self):
"""
Don't forget this returns a list of the labels too with the predictions
"""
all_preds = []
all_labels = []
# by default we'll use the test dataloader, but you can grab val_dataloader or train_dataloader too
for i, batch in enumerate(self.loader.test_dataloader()):
print('batch {}'.format(i))
x, y = batch
x = x.to(self.device)
# y = y.to(self.device)
# save the labels y
all_labels.append(y.cpu().detach().numpy())
embeddings, _ = self.backbone(x)
pred_batch = self.decoder(embeddings)
# take argmax of the predictions
pred_batch = torch.argmax(pred_batch, dim=1)
all_preds.append(pred_batch.cpu().detach().numpy())
# convert list to tensor
all_preds = np.concatenate(all_preds, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_preds, all_labels
def load_model(self, cfg, ckpt_path):
# get the configs
cfg = yaml.load(open(cfg, 'r'), Loader=yaml.FullLoader)
train_cfg = cfg['train'] # grab section `train` section of config
model_cfg = cfg['model'] # grab the `model` section of config
self.d_output = train_cfg['d_output'] # number of classes the head was trained on
# the state dict has both the backbone model and the decoder (normally as a Lightning module), but we need to instantiate both separately
# when not using Lightning.
# instantiate the model
backbone = DNAEmbeddingModel(**model_cfg) # instantiate the backbone separately from the decoder
# instantiate the decoder
decoder = SequenceDecoder(model_cfg['d_model'], d_output=self.d_output, l_output=0, mode='pool') # needs to know the d_model
state_dict = torch.load(ckpt_path, map_location='cpu') # has both backbone and decoder
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
# the state_dict keys slightly mismatch from Lightning..., so we fix it here
decoder_state_dict = {}
decoder_state_dict['output_transform.weight'] = model_state_dict.pop('decoder.0.output_transform.weight')
decoder_state_dict['output_transform.bias'] = model_state_dict.pop('decoder.0.output_transform.bias')
# now actually load the state dict to the decoder and backbone separately
decoder.load_state_dict(decoder_state_dict, strict=True)
backbone.load_state_dict(model_state_dict, strict=True)
# setup tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_seq_len + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
return backbone, decoder, tokenizer
if __name__ == "__main__":
"""
Example cmd for loading a pretrained model (that was finedtuned). This checkpoint was trained on the 'human_nontata_promoters path' dataset.
# (from safari-internal-inf root, note the -m and no '.py')
python -m evals.hg38_inference_decoder --config /home/workspace/eric/safari-internal/configs/evals/hg38_decoder.yaml \
--ckpt_path /home/workspace/eric/safari-internal/outputs/2023-04-14/04-32-17-578382/checkpoints/val/accuracy.ckpt
# enhancer (genomic benchmark)
python -m evals.hg38_inference_decoder --config /home/workspace/eric/safari-internal/configs/evals/hg38_decoder.yaml \
--ckpt_path /home/workspace/eric/safari-internal/outputs/2023-04-12/23-40-51-542457/checkpoints/val/mcc.ckpt --output_path /home/workspace/eric/safari-internal/outputs
# config is located here:
configs/evals/hg38_decoder.yaml
# download the checkpoints from google drive, and put it in the outputs/ dir
https://drive.google.com/drive/folders/11cDmLZgBHr3KkiCtS2V6sqI3Kf8lTW39?usp=share_link
# enhancer weights, from nucleotide transformer, binary classification
/home/workspace/eric/safari-internal/outputs/2023-04-12/23-40-51-542457/checkpoints/val/mcc.ckpt
https://drive.google.com/drive/folders/1wIijtwlqWwzNe_0d3meAXSk7oYJ2POMC?usp=share_link
# promoter tata weights
/home/workspace/eric/safari-internal/outputs/2023-05-01/04-13-05-495708/checkpoints/val/f1_macro.ckpt
note, this model is larger, 2 layers, d_model=256 (not 128!!), and d_inner=1024
https://drive.google.com/drive/folders/1tbIUYwScEox4SLFqeZIFp7Z4YvmIN0M3?usp=share_link
# In general, you need to make sure there config has the same model settings as it was trained on.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default=f"",
)
parser.add_argument(
"--ckpt_path",
default=f"",
help="Path to model state dict checkpoint"
)
parser.add_argument(
"--output_path",
default=f"",
help="Path to where to save npy file"
)
args = parser.parse_args()
task = HG38Inference(args.config, args.ckpt_path, max_seq_len=1024, use_dataloader=True)
# sample sequence, can pass a list of seqs (themselves a list of chars)
# seqs = ["ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT"]
# if you just have a list of sequences, as strings, you can use this function, returns list
# preds = task.predict_on_list(seqs) # return a list of predictions
# print(preds[0].shape) # shape is [batch, 2] for binary class prediction
# OR...
# or if you rather use the existing dataloader for the enhancer dataset, you can call this instead
# returns a np array
preds, labels = task.predict_from_loader()
# print(preds.shape) # shape is [batch, 2] for binary class prediction
# calculate accuracy of preds vs labels
acc = np.mean(preds.squeeze() == labels.squeeze())
print("Acc: ", acc)
breakpoint()
pred_path = os.path.join(args.output_path, "preds.npy")
label_path = os.path.join(args.output_path, "labels.npy")
# save as numpy arr
preds_np = np.array(preds)
labels_np = np.array(labels)
with open(pred_path, 'wb') as f:
np.save(f, preds_np)
with open(label_path, 'wb') as f:
np.save(f, labels_np)
| hyena-dna-main | evals/hg38_inference_decoder.py |
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
sys.path.append(os.environ.get("SAFARI_PATH", "."))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from transformers import AutoTokenizer, GPT2LMHeadModel
# from spacy.lang.en.stop_words import STOP_WORDS
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
try:
from tokenizers import Tokenizer
except:
pass
# https://github.com/openai/gpt-2/issues/131#issuecomment-492786058
# def preprocess(text):
# text = text.replace("“", '"')
# text = text.replace("”", '"')
# return '\n'+text.strip()
class HG38Encoder:
"Encoder inference for HG38 sequences"
def __init__(self, model_cfg, ckpt_path, max_seq_len):
self.max_seq_len = max_seq_len
self.model, self.tokenizer = self.load_model(model_cfg, ckpt_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
def encode(self, seqs):
results = []
# sample code to loop thru each sample and tokenize first (char level)
for seq in tqdm(seqs):
if isinstance(self.tokenizer, Tokenizer):
tokenized_seq = self.tokenizer.encode(seq).ids
else:
tokenized_seq = self.tokenizer.encode(seq)
# can accept a batch, shape [B, seq_len, hidden_dim]
logits, __ = self.model(torch.tensor([tokenized_seq]).to(device=self.device))
# Using head, so just have logits
results.append(logits)
return results
def load_model(self, model_cfg, ckpt_path):
config = yaml.load(open(model_cfg, 'r'), Loader=yaml.FullLoader)
model = ConvLMHeadModel(**config['model_config'])
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(state_dict["state_dict"])
# setup tokenizer
if config['tokenizer_name'] == 'char':
print("**Using Char-level tokenizer**")
# add to vocab
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_seq_len + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
print(tokenizer._vocab_str_to_int)
else:
raise NotImplementedError("You need to provide a custom tokenizer!")
return model, tokenizer
if __name__ == "__main__":
SAFARI_PATH = os.getenv('SAFARI_PATH', '.')
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_cfg",
default=f"{SAFARI_PATH}/configs/evals/hyena_small_150b.yaml",
)
parser.add_argument(
"--ckpt_path",
default=f"",
help="Path to model state dict checkpoint"
)
args = parser.parse_args()
task = HG38Encoder(args.model_cfg, args.ckpt_path, max_seq_len=1024)
# sample sequence, can pass a list of seqs (themselves a list of chars)
seqs = ["ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT"]
logits = task.encode(seqs)
print(logits)
print(logits[0].logits.shape)
breakpoint()
| hyena-dna-main | evals/hg38_inference.py |
import math
import torch
import torch.nn.functional as F
from sklearn.metrics import f1_score, roc_auc_score
from functools import partial
import torchmetrics.functional as tm_f
import torch.distributions as dist
from sklearn.metrics import f1_score, roc_auc_score, matthews_corrcoef
from torchmetrics import Metric
from torchmetrics.classification import MulticlassRecall, MulticlassPrecision
class CorrectAggregatedMetric(Metric):
"""This is needed to calculate some metrics b/c small batch sizes cause aggregation via a simple
average to be off, as some classes might not be present in batch but will get penalized with a 0."""
def __init__(self, class_idx: int, dist_sync_on_step=False):
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.class_idx = torch.tensor(class_idx)
self.add_state("numerator", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("denominator", default=torch.tensor(0.0), dist_reduce_fx="sum")
def _update(self, numerator, denominator, preds, y) -> tuple:
raise NotImplemented
def update(self, logits: torch.Tensor, y: torch.Tensor):
# update metric states
preds = torch.argmax(logits, dim=-1)
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
assert preds.shape == y.shape, f"preds shape {preds.shape} != y shape {y.shape}"
self.numerator, self.denominator = self._update(self.numerator, self.denominator, preds, y)
def compute(self):
# compute final result
value = self.numerator.float() / self.denominator if self.denominator > 0 else torch.tensor(0.0)
return value
def reset(self):
self.numerator = torch.tensor(0.0)
self.denominator = torch.tensor(0.0)
class AccuracyPerClass(CorrectAggregatedMetric):
"""Calculate per class accuracy, i.e. P(y_hat = class_idx AND y = class_idx OR y_hat != class_idx AND y != class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (y == class_idx)
numerator += (preds[relevant_idxs] == class_idx).sum()
denominator += relevant_idxs.sum()
relevant_idxs = (y != class_idx)
numerator += (preds[relevant_idxs] != class_idx).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
class PrecisionPerClass(CorrectAggregatedMetric):
"""Calculate per class precision, i.e. P(y_hat = y | y_hat = class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (preds == class_idx)
numerator += (preds[relevant_idxs] == y[relevant_idxs]).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
class RecallPerClass(CorrectAggregatedMetric):
"""Calculate per class recall, i.e. P(y_hat = y | y = class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (y == class_idx)
numerator += (preds[relevant_idxs] == y[relevant_idxs]).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
def per_token_ppl(logits, y, ks, seq_len):
'''The only difference here (with standard ppl) is no averaging across the seq
logits: model preds, shape [B * seq_len, vocab_size]
y: targets, shape [B * seq_len]
ks: list[ints], of indexes for token of interest (to grab the ppl)
seq_len: int, sequence length
'''
# not averaged
ppl_per = torch.exp(F.cross_entropy(logits, y, reduction='none'))
ppl = ppl_per.view(-1, seq_len) # reshape to get seq len
k_tensor = torch.tensor(ks) - 1 # we need to convert to 0 index in the background
return torch.mean(ppl[:, k_tensor], dim=0).cpu().detach().numpy() # grab kth entry and average across batch
def mcc(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return matthews_corrcoef(y.cpu().numpy(), y_hat.cpu().numpy())
def multinomial_nll(logits, true_counts):
"""Compute the multinomial negative log-likelihood
Args:
logits: predicted logits values [B, 5000, 2]
true_counts: observed count values [B, 5000, 2]
"""
counts_per_example = torch.sum(true_counts, dim=-1) # sum across L, shape = [B, 1, 2]
# breakpoint()
# distr = dist.Multinomial(total_count=counts_per_example, logits=logits)
distr = dist.Multinomial(total_count=counts_per_example.max().item(), logits=logits)
return -torch.sum(distr.log_prob(true_counts)) / torch.tensor(true_counts.shape[0], dtype=torch.float16)
# counts_per_example = torch.sum(true_counts, dim=-1)
# dist = dist.Multinomial(total_count=counts_per_example,
# logits=logits)
# return (-torch.sum(dist.log_prob(true_counts)) /
# torch.tensor(true_counts.shape[0], dtype=torch.float32))
def splice_top_k_acc(logits, targets, pad_mask, k_class=1):
"""
Calculate top accuracy for kth class
By default, there are 3 classes for splice prediction
0 - no splice
1 - start splice
2 - end splice
logits: (batch_size, seq_len, num_classes)
targets: (batch_size, seq_len), --> note, they are not one-hot encoded yet
pad_mask: (batch_size, seq_len)
k_class: int, class we're calculating top k acc for
"""
num_classes = 3
# convert targets to one-hot
targets_onehot = F.one_hot(targets, num_classes=num_classes).float()
# retrieve the k_class logits and targets
logits_i = logits[:, :, k_class]
targets_i = targets_onehot[:, :, k_class] # shape is now (batch_size, seq_len)
pred_probs, labels = map(lambda x: x.view(-1), [logits_i, targets_i]) # Flatten
k = (labels == 1.0).sum().item() # grab positions in labels that are 1
_, top_k_indices = pred_probs.topk(k) # grab predictions at the k positions
correct = labels[top_k_indices] == 1.0 # compare if the predictions are correct (for this k_class only)
return correct.float().mean()
def bpnet_loss(logits, y):
'''Calculate loss for:
logits: (profile_logits, count_logits)
y: (y_profile, y_count)
1. profile logits and y_profile, using multinomial_nll loss
2. count logits and y_count, using MSE loss
and sum the two losses
'''
profile_logits, count_logits = logits
y_profile, y_count = y
# profile loss
profile_loss = multinomial_nll(profile_logits, y_profile)
# count loss
count_loss = mse(count_logits, y_count)
return profile_loss + count_loss
def last_k_ppl(logits, y, seq_len=1024, k=None):
'''
Calculate perplexity for last k tokens in a sequence.
logits: (batch_size * seq_len, vocab_size), note, already flattened
y: (batch_size * seq_len), note, already flattened
seq_len: int, length of each sequence in the batch
k: if None, use all tokens in sequence
returns: (batch_size,) ppl for each sequence in the batch
'''
if k is None:
k = 0 # use the entire sequence
# need to reshape logits and y to be (batch_size, seq_len, vocab_size) and (batch_size, seq_len)
# respectively
# breakpoint()
logits = logits.view(-1, seq_len, logits.shape[-1])
y = y.view(-1, seq_len)
# only use the last k values of seq dim in logits and y
logits = logits[:, -k:, :]
y = y[:, -k:]
# reshape to flatten the batch and seq_len dimensions
logits = logits.reshape(-1, logits.shape[-1])
y = y.reshape(-1)
# get avg and put on cpu
return F.cross_entropy(logits, y, reduction='none').view(y.shape[0], -1).mean().exp().cpu()
def _student_t_map(mu, sigma, nu):
sigma = F.softplus(sigma)
nu = 2.0 + F.softplus(nu)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1), nu.squeeze(axis=-1)
def student_t_loss(outs, y):
mu, sigma, nu = outs[..., 0], outs[..., 1], outs[..., 2]
mu, sigma, nu = _student_t_map(mu, sigma, nu)
y = y.squeeze(axis=-1)
nup1_half = (nu + 1.0) / 2.0
part1 = 1.0 / nu * torch.square((y - mu) / sigma)
Z = (
torch.lgamma(nup1_half)
- torch.lgamma(nu / 2.0)
- 0.5 * torch.log(math.pi * nu)
- torch.log(sigma)
)
ll = Z - nup1_half * torch.log1p(part1)
return -ll.mean()
def gaussian_ll_loss(outs, y):
mu, sigma = outs[..., 0], outs[..., 1]
y = y.squeeze(axis=-1)
sigma = F.softplus(sigma)
ll = -1.0 * (
torch.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * torch.square((y - mu) / sigma)
)
return -ll.mean()
def binary_cross_entropy(logits, y):
# BCE loss requires squeezing last dimension of logits so it has the same shape as y
# requires y to be float, since it's overloaded to represent a probability
return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float())
def binary_accuracy(logits, y):
return torch.eq(logits.squeeze(-1) >= 0, y).float().mean()
def padded_cross_entropy(logits, y, pad_mask, pad_value=-1):
"""Will ignore the pad value in label (eg, -1)
logits: (batch_size, seq_len, vocab_size)
y: (batch_size, seq_len)
pad_mask: (batch_size, seq_len)
"""
# need to apply pad mask to y
y_pad = y + pad_mask * pad_value
logits = logits.view(-1, logits.shape[-1])
y_pad = y_pad.view(-1)
return F.cross_entropy(logits, y_pad, ignore_index=pad_value)
def cross_entropy(logits, y, ignore_index=-100):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return F.cross_entropy(logits, y, ignore_index=ignore_index)
def soft_cross_entropy(logits, y, label_smoothing=0.0):
logits = logits.view(-1, logits.shape[-1])
# target is now 2d (no target flattening)
return F.cross_entropy(logits, y, label_smoothing=label_smoothing)
def accuracy(logits, y):
logits = logits.view(-1, logits.shape[-1])
preds = torch.argmax(logits, dim=-1)
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.eq(preds, y).float().mean()
def accuracy_ignore_index(logits, y, ignore_index=-100):
num_classes = logits.shape[-1]
preds = torch.argmax(logits, dim=-1)
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
accuracy = tm_f.classification.accuracy(preds, y, 'multiclass', num_classes=num_classes, ignore_index=ignore_index, average='micro')
return accuracy
def accuracy_at_k(logits, y, k=1):
logits = logits.view(-1, logits.shape[-1])
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.topk(logits, k, dim=-1)[1].eq(y.unsqueeze(-1)).any(dim=-1).float().mean()
def f1_binary(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="binary")
def f1_macro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="macro")
def f1_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="micro")
def roc_auc_macro(logits, y):
logits = logits.view(
-1, logits.shape[-1]
).detach() # KS: had to add detach to eval while training
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="macro"
)
def roc_auc_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="micro"
)
def mse(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.mse_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
# TODO document the use case of this
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.mse_loss(outs_masked, y_masked)
def forecast_rmse(outs, y, len_batch=None):
# TODO: generalize, currently for Monash dataset
return torch.sqrt(F.mse_loss(outs, y, reduction='none').mean(1)).mean()
def mae(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.l1_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.l1_loss(outs_masked, y_masked)
# Metrics that can depend on the loss
def loss(x, y, loss_fn):
""" This metric may be useful because the training loss may add extra regularization (e.g. weight decay implemented as L2 penalty), while adding this as a metric skips the additional losses """
return loss_fn(x, y)
def bpb(x, y, loss_fn):
""" bits per byte (image density estimation, speech generation, char LM) """
return loss_fn(x, y) / math.log(2)
def ppl(x, y, loss_fn):
return torch.exp(loss_fn(x, y))
# should have a better way to do this
output_metric_fns = {
"binary_cross_entropy": binary_cross_entropy,
"cross_entropy": cross_entropy,
"padded_cross_entropy": padded_cross_entropy,
"binary_accuracy": binary_accuracy,
"precision": MulticlassPrecision,
"precision_per_class": PrecisionPerClass,
"recall": MulticlassRecall,
"recall_per_class": RecallPerClass,
"accuracy": accuracy,
"accuracy_per_class": AccuracyPerClass,
"accuracy_ignore_index": accuracy_ignore_index,
'accuracy@3': partial(accuracy_at_k, k=3),
'accuracy@5': partial(accuracy_at_k, k=5),
'accuracy@10': partial(accuracy_at_k, k=10),
"eval_loss": loss,
"mcc": mcc,
"mse": mse,
"mae": mae,
"forecast_rmse": forecast_rmse,
"f1_binary": f1_binary,
"f1_macro": f1_macro,
"f1_micro": f1_micro,
"roc_auc_macro": roc_auc_macro,
"roc_auc_micro": roc_auc_micro,
"soft_cross_entropy": soft_cross_entropy, # only for pytorch 1.10+
"student_t": student_t_loss,
"gaussian_ll": gaussian_ll_loss,
"last_k_ppl": last_k_ppl,
"bpnet_loss": bpnet_loss,
"per_token_ppl": per_token_ppl,
'splice_top_1_acc': partial(splice_top_k_acc, k_class=1),
'splice_top_2_acc': partial(splice_top_k_acc, k_class=2),
}
try:
from segmentation_models_pytorch.utils.functional import iou
from segmentation_models_pytorch.losses.focal import focal_loss_with_logits
def iou_with_logits(pr, gt, eps=1e-7, threshold=None, ignore_channels=None):
return iou(pr.sigmoid(), gt, eps=eps, threshold=threshold, ignore_channels=ignore_channels)
output_metric_fns["iou"] = partial(iou, threshold=0.5)
output_metric_fns["iou_with_logits"] = partial(iou_with_logits, threshold=0.5)
output_metric_fns["focal_loss"] = focal_loss_with_logits
except ImportError:
pass
loss_metric_fns = {
"loss": loss,
"bpb": bpb,
"ppl": ppl,
}
metric_fns = {**output_metric_fns, **loss_metric_fns} # TODO py3.9
| hyena-dna-main | src/tasks/metrics.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/metrics/perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
# Also adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/text/perplexity.py
# But we pass in the loss to avoid recomputation
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torchmetrics import Metric
try:
from flash_attn.losses.cross_entropy import CrossEntropyLoss
except ImportError:
CrossEntropyLoss = torch.nn.CrossEntropyLoss
try:
from apex.transformer import parallel_state
except ImportError:
parallel_state = None
class Perplexity(Metric):
r"""
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits
per word a model needs to represent the sample.
Args:
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> import torch
>>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))
>>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))
>>> target[0, 6:] = -100
>>> metric = Perplexity(ignore_index=-100)
>>> metric(preds, target)
tensor(5.2545)
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
total_log_probs: Tensor
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("total_log_probs", default=torch.tensor(0.0, dtype=torch.float64),
dist_reduce_fx="sum")
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum")
self.loss_fn = CrossEntropyLoss()
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
"""Compute and store intermediate statistics for Perplexity.
Args:
preds:
Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
target:
Ground truth values with a shape [batch_size, seq_len].
"""
count = target.numel()
if loss is None:
loss = self.loss_fn(preds, target)
self.total_log_probs += loss.double() * count
self.count += count
def compute(self) -> Tensor:
"""Compute the Perplexity.
Returns:
Perplexity
"""
return torch.exp(self.total_log_probs / self.count)
class NumTokens(Metric):
"""Keep track of how many tokens we've seen.
"""
# TODO: how do we prevent the reset between the epochs? The reset happens on the 1st batch
# of the next epoch.
# Right now the hack is that we override reset(), which would mess up the forward method.
# We then override forward to do the right thing.
is_differentiable = False
higher_is_better = False
full_state_update = False
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum",
persistent=True) # We want the count to be saved to state-dict
if parallel_state is not None and not parallel_state.is_unitialized():
self.tensor_parallel_world_size = parallel_state.get_tensor_model_parallel_world_size()
else:
self.tensor_parallel_world_size = 1
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
self.count += target.numel() // self.tensor_parallel_world_size
def compute(self) -> Tensor:
return self.count
def reset(self):
count = self.count
super().reset()
self.count = count
# Adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/metric.py
def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:
"""forward computation using single call to `update` to calculate the metric value on the current batch and
accumulate global state.
This can be done when the global metric state is a sinple reduction of batch states.
"""
self.update(*args, **kwargs)
return self.compute()
torchmetric_fns = {
"perplexity": Perplexity,
"num_tokens": NumTokens,
} | hyena-dna-main | src/tasks/torchmetrics.py |
from typing import Optional, List, Tuple
import math
import functools
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from omegaconf import ListConfig
from src.models.nn.components import ReversibleInstanceNorm1dInput, ReversibleInstanceNorm1dOutput, \
TSNormalization, TSInverseNormalization
from src.models.nn.adaptive_softmax import AdaptiveEmbedding, ProjectedAdaptiveLogSoftmax
import src.tasks.metrics as M
from src.tasks.torchmetrics import torchmetric_fns as tm_mine
import src.models.nn.utils as U
import torchmetrics as tm
from src.utils.config import to_list, instantiate
from torchmetrics import MetricCollection
class BaseTask:
""" Abstract class that takes care of:
- loss function
- arbitrary metrics
- forward pass
- (optional) encoder module that interfaces with dataset (inputs) and model
- (optional) decoder module that interfaces with dataset (targets) and model
"""
encoder = None
decoder = None
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None):
""" This class is allowed to grab attributes directly off a constructed dataset and model object """
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
torchmetrics = MetricCollection(self._init_torchmetrics())
self.train_torchmetrics = torchmetrics.clone(prefix='train/')
self.val_torchmetrics = torchmetrics.clone(prefix='val/')
self.test_torchmetrics = torchmetrics.clone(prefix='test/')
def _init_torchmetrics(self):
"""
Instantiate torchmetrics.
"""
tracked_torchmetrics = {}
for name in self.torchmetric_names:
if name in tm_mine:
tracked_torchmetrics[name] = tm_mine[name]().to('cuda')
elif name in ['AUROC', 'StatScores', 'Precision', 'Recall', 'F1', 'F1Score']:
tracked_torchmetrics[name] = getattr(tm, name)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False).to('cuda')
elif '@' in name:
k = int(name.split('@')[1])
mname = name.split('@')[0]
tracked_torchmetrics[name] = getattr(tm, mname)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False, top_k=k).to('cuda')
else:
tracked_torchmetrics[name] = getattr(tm, name)(compute_on_step=False).to('cuda')
return tracked_torchmetrics
def _reset_torchmetrics(self, prefix=None):
"""
Reset torchmetrics for a prefix
associated with a particular dataloader (e.g. train, val, test).
Generally do this at the start of an epoch.
"""
all_prefixes = [prefix] if prefix is not None else self._tracked_torchmetrics
for prefix in all_prefixes:
if prefix in self._tracked_torchmetrics:
self._tracked_torchmetrics[prefix].reset()
def get_torchmetrics(self, prefix):
"""
Compute torchmetrics for a prefix associated with
a particular dataloader (e.g. train, val, test).
Generally do this at the end of an epoch.
"""
return {name: self._tracked_torchmetrics[prefix][name].compute() for name in self.torchmetric_names}
def torchmetrics(self, x, y, prefix, loss=None):
"""
Update torchmetrics with new x, y .
Prefix corresponds to a particular dataloader (e.g. train, val, test).
Generally call this every batch.
"""
if prefix not in self._tracked_torchmetrics:
self._init_torchmetrics(prefix)
self._tracked_torchmetrics[prefix](x, y, loss=loss)
# for name in self.torchmetric_names:
# if name.startswith('Accuracy'):
# if len(x.shape) > 2:
# # Multi-dimensional, multi-class
# self._tracked_torchmetrics[prefix][name].update(x.transpose(1, 2), y.squeeze())
# continue
# self._tracked_torchmetrics[prefix][name].update(x, y)
def get_torchmetrics(self, prefix):
return self._tracked_torchmetrics[prefix]
def metrics(self, x, y, **kwargs):
"""
Metrics are just functions
output metrics are a function of output and target
loss metrics are a function of loss (e.g. perplexity)
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = model(x, **w, state=_state)
self._state = state
x, w = decoder(x, state=state, **z)
return x, y, w
class Scalar(nn.Module):
def __init__(self, c=1):
super().__init__()
self.c = c
def forward(self, x):
return x * self.c
class LMTask(BaseTask):
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = model(x, **w, state=_state)
self._state = state
x, w = decoder(x, state=state, **z)
x = x.logits
x = rearrange(x, '... C -> (...) C')
y = rearrange(y, '... -> (...)')
return x, y, w
class SpliceTask(BaseTask):
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, pad_mask = batch # z holds extra dataloader info such as resolution
x, state = model(x, state=_state)
x, w = decoder(x, state=state)
# x = x.logits # if using dna_embedding model, logits already passed
return x, y, {"pad_mask": pad_mask} # expects a dict for the 3rd element
class MultiClass(BaseTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.continual_metrics = {}
for name in self.metric_names:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec] = M.output_metric_fns[name](spec_idx)
def metrics(self, x, y, **kwargs):
output_metrics = {}
for name in self.metric_names:
if name in M.output_metric_fns:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec] = self.continual_metrics[name + '_' + spec].to(x.device)
self.continual_metrics[name + '_' + spec].update(x, y)
output_metrics[name + '_' + spec] = self.continual_metrics[name + '_' + spec].compute()
elif name in ['precision', 'recall']:
self.continual_metrics[name] = self.continual_metrics[name].to(x.device)
output_metrics[name] = self.continual_metrics[name](x, y)
else:
output_metrics[name] = U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
def _reset_torchmetrics(self, prefix=None):
super()._reset_torchmetrics(prefix)
for name in self.metric_names:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec].reset()
class HG38Task(LMTask):
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None, last_k_ppl=None, per_token_ppl=None):
""" Extending LMTask to add custom metrics for HG38 task
last_k_ppl: config for custom ppl, with hparams to pass with it
per_token_ppl: config for per token ppl calc, with list of k (ppls) to track
"""
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
self.last_k_ppl = last_k_ppl
self.per_token_ppl = per_token_ppl
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
torchmetrics = MetricCollection(self._init_torchmetrics())
self.train_torchmetrics = torchmetrics.clone(prefix='train/')
self.val_torchmetrics = torchmetrics.clone(prefix='val/')
self.test_torchmetrics = torchmetrics.clone(prefix='test/')
# Create custom metrics for last k ppl
# last_k_ppl is a list of dicts (configs), so loop thru them
if self.last_k_ppl is not None:
self.custom_ppl_dict = {}
for k in self.last_k_ppl:
key_name = "last_" + str(k) + "_ppl"
# create config
custom_ppl_config = {"_name_": "last_k_ppl", "k": k, "seq_len": self.dataset.max_length}
k_ppl_fn = instantiate(M.output_metric_fns, custom_ppl_config, partial=True)
k_ppl_fn = U.discard_kwargs(k_ppl_fn)
self.custom_ppl_dict[key_name] = k_ppl_fn
# Create custom metric for per token ppl
if self.per_token_ppl is not None:
per_token_ppl_config = {"_name_": "per_token_ppl", "ks": self.per_token_ppl["ks"], "seq_len": self.dataset.max_length}
per_token_fn = instantiate(M.output_metric_fns, per_token_ppl_config, partial=True)
per_token_fn = U.discard_kwargs(per_token_fn)
self.per_token_fn = per_token_fn
def metrics(self, x, y, **kwargs):
"""
Need to modify metrics to include custom metrics
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
# loop thru all custom ppls and add them to output_metrics
if self.last_k_ppl is not None:
for key_name, k_ppl_fn in self.custom_ppl_dict.items():
output_metrics[key_name] = k_ppl_fn(x, y, **kwargs)
# loop thru all custom ppls and add them to output_metrics
if self.per_token_ppl is not None:
# returns k ppl values, (averaged over batch)
per_k_ppl = self.per_token_fn(x, y, **kwargs)
# loop over ks to log metric
for ind, k in enumerate(self.per_token_ppl["ks"]):
key_name = "ppl_at_{}".format(k)
k = k-1 # 0 index in the background
output_metrics[key_name] = per_k_ppl[ind] # should be in order
return {**output_metrics, **loss_metrics}
class BPNetTask(BaseTask):
# def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None):
# """ Extending LMTask to add custom metrics for HG38 task
# """
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
profile_x, count_x = model(x) # has 2 head outputs
return (profile_x, count_x), y, {} # y is a tuple too, and need to return empty dict for w
class ForecastingTask(BaseTask):
class DummyModule(nn.Module):
def forward(self, *args):
return args
def __init__(self, norm='mean', **kwargs):
super().__init__(**kwargs)
if norm == 'revnorm':
self.encoder = ReversibleInstanceNorm1dInput(self.dataset.d_input, transposed=False)
self.decoder = ReversibleInstanceNorm1dOutput(self.encoder)
elif norm == 'mean':
self.encoder = TSNormalization(method='mean', horizon=self.dataset.dataset_train.forecast_horizon)
self.decoder = TSInverseNormalization(method='mean', normalizer=self.encoder)
elif norm == 'last':
self.encoder = TSNormalization(method='last', horizon=self.dataset.dataset_train.forecast_horizon)
self.decoder = TSInverseNormalization(method='last', normalizer=self.encoder)
else:
self.encoder = None
self.decoder = None
try:
if hasattr(self.dataset.dataset_train, 'mean'):
self.mean = torch.tensor(self.dataset.dataset_train.mean)
self.std = torch.tensor(self.dataset.dataset_train.std)
elif hasattr(self.dataset.dataset_train, 'standardization'):
self.mean = torch.tensor(self.dataset.dataset_train.standardization['means'])
self.std = torch.tensor(self.dataset.dataset_train.standardization['stds'])
else:
self.mean = None
self.std = None
except AttributeError:
raise AttributeError('Dataset does not have mean/std attributes')
self.mean = torch.tensor(self.dataset.dataset_train.standardization['means'])
self.std = torch.tensor(self.dataset.dataset_train.standardization['stds'])
if hasattr(self.dataset.dataset_train, 'log_transform'):
self.log_transform = self.dataset.dataset_train.log_transform
else:
self.log_transform = False
print("Log Transform", self.log_transform)
def metrics(self, x, y, state=None, timestamps=None, ids=None): # Explicit about which arguments the decoder might pass through, but can future-proof with **kwargs
if self.mean is not None:
means = self.mean[ids].to(x.device)
stds = self.std[ids].to(x.device)
x_ = x * stds[:, None, None] + means[:, None, None]
y_ = y * stds[:, None, None] + means[:, None, None]
else:
x_ = x
y_ = y
if self.log_transform:
x_ = torch.exp(x_)
y_ = torch.exp(y_)
return super().metrics(x_, y_)
class VideoTask(BaseTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self._y_to_logits = {}
self._vid_to_logits = {}
self._vid_to_label = {}
# TODO needed to extract the first element of y, which includes the video idea; there should be a cleaner pattern to this
import copy
loss_fn = copy.deepcopy(self.loss)
self.loss = lambda x, y: loss_fn(x, y[0])
if hasattr(self, 'loss_val'):
loss_val_fn = copy.deepcopy(self.loss_val)
self.loss_val = lambda x, y: loss_val_fn(x, y[0])
def metrics(self, logits, y, **kwargs):
labels, vids = y
return super().metrics(logits, labels, **kwargs)
def torchmetrics(self, logits, y, prefix):
"""
logits: (batch, n_classes)
y = tuple of labels and video ids
labels: (batch)
vids: (batch)
"""
for _logits, _label, _vid in zip(logits, y[0], y[1]):
_vid = _vid.item()
# Check that labels are consistent per video id
assert self._vid_to_label[prefix].get(_vid, _label) == _label
self._vid_to_label[prefix][_vid] = _label
self._vid_to_logits[prefix][_vid].append(_logits)
def _reset_torchmetrics(self, prefix):
self._vid_to_logits[prefix] = collections.defaultdict(list)
self._vid_to_label[prefix] = {}
def get_torchmetrics(self, prefix):
vid_to_average_logits = {vid: torch.mean(torch.stack(logits, dim=0), dim=0) for vid, logits in self._vid_to_logits[prefix].items()}
# y is (label, vid) pair
all_labels = torch.stack(list(self._vid_to_label[prefix].values()), dim=0) # (n_videos)
all_logits = torch.stack(list(vid_to_average_logits.values()), dim=0) # (n_videos, n_classes)
m = M.accuracy(all_logits, all_labels)
return {'aggregate_accuracy': m}
class AdaptiveLMTask(BaseTask):
def __init__(
self,
div_val,
cutoffs : List[int],
tie_weights : bool,
tie_projs : List[bool],
init_scale=1.0,
bias_scale=0.0,
dropemb=0.0,
dropsoft=0.0,
**kwargs,
):
super().__init__(**kwargs)
n_tokens = self.dataset.n_tokens
d_model = self.model.d_model
d_output = self.model.d_output
encoder = AdaptiveEmbedding(
n_tokens,
d_model,
d_model,
cutoffs=cutoffs,
div_val=div_val,
init_scale=init_scale,
dropout=dropemb,
)
if tie_weights:
assert d_model == d_output
emb_layers = [i.weight for i in encoder.emb_layers]
else:
emb_layers = None
# Construct decoder/loss
emb_projs = encoder.emb_projs
loss = ProjectedAdaptiveLogSoftmax(
n_tokens, d_output, d_output,
cutoffs, div_val=div_val,
tie_projs=tie_projs,
out_projs=emb_projs,
out_layers_weights=emb_layers,
bias_scale=bias_scale,
dropout=dropsoft,
)
self.encoder = encoder
self.loss = loss
class ImageNetTask(BaseTask):
"""
Imagenet training uses mixup augmentations, which require a separate loss for train and val,
which we overide the base task here.
"""
def __init__(self, **kwargs):
import hydra
super().__init__(
dataset=kwargs.get("dataset", None),
model=kwargs.get("model", None),
loss=kwargs.get("loss", None), # we still create the base loss here, but will overide below
metrics=kwargs.get("metrics", None),
torchmetrics=kwargs.get("torchmetrics", None)
)
# if using mixup, overide loss (train) and loss_val, otherwise
# we have just one loss from the base task above
if "loss_val" in kwargs and "loss_train" in kwargs:
self.loss = hydra.utils.instantiate(kwargs.get("loss_train"))
self.loss_val = hydra.utils.instantiate(kwargs.get('loss_val'))
registry = {
'base': BaseTask,
'multiclass': MultiClass,
'lm': LMTask,
'imagenet': ImageNetTask,
'forecasting': ForecastingTask,
'video': VideoTask,
'hg38': HG38Task,
'bpnet': BPNetTask,
"splice": SpliceTask,
}
| hyena-dna-main | src/tasks/tasks.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
import src.utils.train
log = src.utils.train.get_logger(__name__)
class Decoder(nn.Module):
"""This class doesn't do much but just signals the interface that Decoders are expected to adhere to
TODO: is there a way to enforce the signature of the forward method?
"""
def forward(self, x, **kwargs):
"""
x: (batch, length, dim) input tensor
state: additional state from the model backbone
*args, **kwargs: additional info from the dataset
Returns:
y: output tensor
*args: other arguments to pass into the loss function
"""
return x
def step(self, x):
"""
x: (batch, dim)
"""
return self.forward(x.unsqueeze(1)).squeeze(1)
class SequenceDecoder(Decoder):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
class TokenDecoder(Decoder):
"""Decoder for token level classification"""
def __init__(
self, d_model, d_output=3
):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
x = self.output_transform(x)
return x
class NDDecoder(Decoder):
"""Decoder for single target (e.g. classification or regression)"""
def __init__(
self, d_model, d_output=None, mode="pool"
):
super().__init__()
assert mode in ["pool", "full"]
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
self.mode = mode
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.mode == 'pool':
x = reduce(x, 'b ... h -> b h', 'mean')
x = self.output_transform(x)
return x
class StateDecoder(Decoder):
"""Use the output state to decode (useful for stateful models such as RNNs or perhaps Transformer-XL if it gets implemented"""
def __init__(self, d_model, state_to_tensor, d_output):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
self.state_transform = state_to_tensor
def forward(self, x, state=None):
return self.output_transform(self.state_transform(state))
class RetrievalHead(nn.Module):
def __init__(self, d_input, d_model, n_classes, nli=True, activation="relu"):
super().__init__()
self.nli = nli
if activation == "relu":
activation_fn = nn.ReLU()
elif activation == "gelu":
activation_fn = nn.GELU()
else:
raise NotImplementedError
if (
self.nli
): # Architecture from https://github.com/mlpen/Nystromformer/blob/6539b895fa5f798ea0509d19f336d4be787b5708/reorganized_code/LRA/model_wrapper.py#L74
self.classifier = nn.Sequential(
nn.Linear(4 * d_input, d_model),
activation_fn,
nn.Linear(d_model, n_classes),
)
else: # Head from https://github.com/google-research/long-range-arena/blob/ad0ff01a5b3492ade621553a1caae383b347e0c1/lra_benchmarks/models/layers/common_layers.py#L232
self.classifier = nn.Sequential(
nn.Linear(2 * d_input, d_model),
activation_fn,
nn.Linear(d_model, d_model // 2),
activation_fn,
nn.Linear(d_model // 2, n_classes),
)
def forward(self, x):
"""
x: (2*batch, dim)
"""
outs = rearrange(x, "(z b) d -> z b d", z=2)
outs0, outs1 = outs[0], outs[1] # (n_batch, d_input)
if self.nli:
features = torch.cat(
[outs0, outs1, outs0 - outs1, outs0 * outs1], dim=-1
) # (batch, dim)
else:
features = torch.cat([outs0, outs1], dim=-1) # (batch, dim)
logits = self.classifier(features)
return logits
class RetrievalDecoder(Decoder):
"""Combines the standard FeatureDecoder to extract a feature before passing through the RetrievalHead"""
def __init__(
self,
d_input,
n_classes,
d_model=None,
nli=True,
activation="relu",
*args,
**kwargs
):
super().__init__()
if d_model is None:
d_model = d_input
self.feature = SequenceDecoder(
d_input, d_output=None, l_output=0, *args, **kwargs
)
self.retrieval = RetrievalHead(
d_input, d_model, n_classes, nli=nli, activation=activation
)
def forward(self, x, state=None, **kwargs):
x = self.feature(x, state=state, **kwargs)
x = self.retrieval(x)
return x
class PackedDecoder(Decoder):
def forward(self, x, state=None):
x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Decoder,
"id": nn.Identity,
"linear": nn.Linear,
"sequence": SequenceDecoder,
"nd": NDDecoder,
"retrieval": RetrievalDecoder,
"state": StateDecoder,
"pack": PackedDecoder,
"token": TokenDecoder,
}
model_attrs = {
"linear": ["d_output"],
"sequence": ["d_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_state", "state_to_tensor"],
"forecast": ["d_output"],
"token": ["d_output"],
}
dataset_attrs = {
"linear": ["d_output"],
"sequence": ["d_output", "l_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_output"],
"forecast": ["d_output", "l_output"],
"token": ["d_output"],
}
def _instantiate(decoder, model=None, dataset=None):
"""Instantiate a single decoder"""
if decoder is None:
return None
if isinstance(decoder, str):
name = decoder
else:
name = decoder["_name_"]
# Extract arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate decoder
obj = utils.instantiate(registry, decoder, *model_args, *dataset_args)
return obj
def instantiate(decoder, model=None, dataset=None):
"""Instantiate a full decoder config, e.g. handle list of configs
Note that arguments are added in reverse order compared to encoder (model first, then dataset)
"""
decoder = utils.to_list(decoder)
return U.PassthroughSequential(
*[_instantiate(d, model=model, dataset=dataset) for d in decoder]
)
| hyena-dna-main | src/tasks/decoders.py |
import datetime
import math
from typing import ForwardRef
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
from src.models.sequence.block import SequenceResidualBlock
from src.models.nn.components import Normalization
class Encoder(nn.Module):
"""Encoder abstraction
Accepts a tensor and optional kwargs. Outside of the main tensor, all other arguments should be kwargs.
Returns a tensor and optional kwargs.
Encoders are combined via U.PassthroughSequential which passes these kwargs through in a pipeline. The resulting kwargs are accumulated and passed into the model backbone.
"""
def forward(self, x, **kwargs):
"""
x: input tensor
*args: additional info from the dataset (e.g. sequence lengths)
Returns:
y: output tensor
*args: other arguments to pass into the model backbone
"""
return x, {}
class PositionalIDEncoder(Encoder):
def forward(self, x):
position_ids = torch.arange(x.shape[-1], dtype=torch.long, device=x.device)
position_ids = repeat(position_ids, 'l -> b l', b=x.shape[0])
return x, { 'position_ids': position_ids }
# Adapted from https://github.com/pytorch/examples/blob/master/word_language_model/model.py
class PositionalEncoder(Encoder):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoder(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=16384, pe_init=None):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
if pe_init is not None:
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.normal_(self.pe, 0, pe_init)
# self.pe = pe.unsqueeze(1)
else:
pe = torch.zeros(max_len, d_model)
position = torch.arange(0.0, max_len).unsqueeze(1)
div_term = torch.exp(
-math.log(10000.0) * torch.arange(0.0, d_model, 2.0) / d_model
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
self.attn_mask = None
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
lens: actual lengths of sequences
Shape:
x: [l_sequence, n_batch, d_model]
Returns: [l_sequence, n_batch, d_model]
attn_mask: [l_sequence, l_sequence]
padding_mask:
"""
x = x + self.pe[: x.size(-2)]
return self.dropout(x)
class ClassEmbedding(Encoder):
# Should also be able to define this by subclassing Embedding
def __init__(self, n_classes, d_model):
super().__init__()
self.embedding = nn.Embedding(n_classes, d_model)
def forward(self, x, y):
x = x + self.embedding(y).unsqueeze(-2) # (B, L, D)
return x
class Conv1DEncoder(Encoder):
def __init__(self, d_input, d_model, kernel_size=25, stride=1, padding='same'):
super().__init__()
self.conv = nn.Conv1d(
in_channels=d_input,
out_channels=d_model,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
def forward(self, x):
# BLD -> BLD
x = self.conv(x.transpose(1, 2)).transpose(1, 2)
return x
class LayerEncoder(Encoder):
"""Use an arbitary SequenceModule layer"""
def __init__(self, d_model, prenorm=False, norm='layer', layer=None):
super().__init__()
# Simple stack of blocks
layer["transposed"] = False
self.layer = SequenceResidualBlock(
d_input=d_model,
prenorm=prenorm,
layer=layer,
residual='R',
norm=norm,
pool=None,
)
def forward(self, x):
x, _ = self.layer(x) # Discard state
return x
class TimestampEmbeddingEncoder(Encoder):
"""
General time encoder for Pandas Timestamp objects (encoded as torch tensors).
See MonashDataset for an example of how to return time features as 'z's.
"""
cardinalities = {
'day': (1, 31),
'hour': (0, 23),
'minute': (0, 59),
'second': (0, 59),
'month': (1, 12),
'year': (1950, 2010), # (1800, 3000) used to be (1970, datetime.datetime.now().year + 1) but was not enough for all datasets in monash
'dayofweek': (0, 6),
'dayofyear': (1, 366),
'quarter': (1, 4),
'week': (1, 53),
'is_month_start': (0, 1),
'is_month_end': (0, 1),
'is_quarter_start': (0, 1),
'is_quarter_end': (0, 1),
'is_year_start': (0, 1),
'is_year_end': (0, 1),
'is_leap_year': (0, 1),
}
def __init__(self, d_model, table=False, features=None):
super().__init__()
self.table = table
self.ranges = {k: max_val - min_val + 2 for k, (min_val, max_val) in self.cardinalities.items()} # padding for null included
if features is None:
pass
else:
self.cardinalities = {k: v for k, v in self.cardinalities.items() if k in features}
if table:
self.embedding = nn.ModuleDict({
attr: nn.Embedding(maxval - minval + 2, d_model, padding_idx=0)
for attr, (minval, maxval) in self.cardinalities.items()
})
else:
self.embedding = nn.ModuleDict({
attr: nn.Linear(1, d_model)
for attr in self.cardinalities
})
def forward(self, x, timestamps=None):
for attr in timestamps:
mask = timestamps[attr] == -1
timestamps[attr] = timestamps[attr] - self.cardinalities[attr][0]
timestamps[attr][mask] = 0
if self.table:
x = x + self.embedding[attr](timestamps[attr].to(torch.long))
else:
x = x + self.embedding[attr]((2 * timestamps[attr] / self.ranges[attr] - 1).unsqueeze(-1))
#x = x + self.embedding(timestamps[attr].to(torch.float)).unsqueeze(1)
return x
class TimeEncoder(Encoder):
def __init__(self, n_tokens_time, d_model, timeenc=0):
super().__init__()
self.timeenc = timeenc
if self.timeenc == 0:
self.encoders = nn.ModuleList(
[nn.Embedding(v, d_model) for v in n_tokens_time]
)
else:
self.encoders = nn.Linear(len(n_tokens_time), d_model)
self.mask_embed = nn.Embedding(2, d_model)
def forward(self, x, mark=None, mask=None):
assert mark is not None and mask is not None, "Extra arguments should be returned by collate function"
if self.timeenc == 0:
assert mark.size(-1) == len(self.encoders)
embeddings = [
embed(z) for embed, z in zip(self.encoders, torch.unbind(mark, dim=-1))
]
time_encode = torch.sum(torch.stack(embeddings), dim=0)
else:
time_encode = self.encoders(mark)
mask_encode = self.mask_embed(mask.squeeze(-1))
return x + time_encode + mask_encode # (B, L, d_model)
class PackedEncoder(Encoder):
def forward(self, x, len_batch=None):
assert len_batch is not None
x = nn.utils.rnn.pack_padded_sequence(
x, len_batch.cpu(), enforce_sorted=False, batch_first=True,
)
return x
class OneHotEncoder(Encoder):
def __init__(self, n_tokens, d_model):
super().__init__()
assert n_tokens <= d_model
self.d_model = d_model
def forward(self, x):
return F.one_hot(x.squeeze(-1), self.d_model).float()
class Conv2DPatchEncoder(Encoder):
"""
For encoding images into a sequence of patches.
"""
def __init__(self, d_input, d_model, filter_sizes, flat=False):
"""
d_input: dim of encoder input (data dimension)
d_model: dim of encoder output (model dimension)
filter_sizes: tuple with fh, fw
flat: if image is flattened from dataloader (like in cifar),
then we need to reshape back to 2D before conv
"""
fh, fw = filter_sizes
self.flat = flat
super().__init__()
assert len(filter_sizes) == 2
self.encoder = nn.Conv2d(d_input, d_model, kernel_size=(fh, fw), stride=(fh, fw))
def forward(self, x):
"""
x shape expected = [b, h, w, c]
returns tuple with x, with new shape = [b, seq_len, c_out]
"""
x = rearrange(x, 'b h w c -> b c h w')
x = self.encoder(x)
x = rearrange(x, 'b c h w -> b (h w) c')
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Encoder,
"id": nn.Identity,
"embedding": nn.Embedding,
"linear": nn.Linear,
"position": PositionalEncoder,
"position_id": PositionalIDEncoder,
"class": ClassEmbedding,
"pack": PackedEncoder,
"time": TimeEncoder,
"onehot": OneHotEncoder,
"conv1d": Conv1DEncoder,
"patch2d": Conv2DPatchEncoder,
"timestamp_embedding": TimestampEmbeddingEncoder,
"layer": LayerEncoder,
}
dataset_attrs = {
"embedding": ["n_tokens"],
"linear": ["d_input"], # TODO make this d_data?
"class": ["n_classes"],
"time": ["n_tokens_time"],
"onehot": ["n_tokens"],
"conv1d": ["d_input"],
"patch2d": ["d_input"],
}
model_attrs = {
"embedding": ["d_model"],
"linear": ["d_model"],
"position": ["d_model"],
"class": ["d_model"],
"time": ["d_model"],
"onehot": ["d_model"],
"conv1d": ["d_model"],
"patch2d": ["d_model"],
"timestamp_embedding": ["d_model"],
"layer": ["d_model"],
}
def _instantiate(encoder, dataset=None, model=None):
"""Instantiate a single encoder"""
if encoder is None:
return None
if isinstance(encoder, str):
name = encoder
else:
name = encoder["_name_"]
# Extract dataset/model arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate encoder
obj = utils.instantiate(registry, encoder, *dataset_args, *model_args)
return obj
def instantiate(encoder, dataset=None, model=None):
encoder = utils.to_list(encoder)
return U.PassthroughSequential(
*[_instantiate(e, dataset=dataset, model=model) for e in encoder]
)
| hyena-dna-main | src/tasks/encoders.py |
from typing import Any
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(pl.Callback):
""" Log the number of parameters of the model """
def __init__(
self,
total: bool = True,
trainable: bool = True,
fixed: bool = True,
):
super().__init__()
self._log_stats = AttributeDict(
{
'total_params_log': total,
'trainable_params_log': trainable,
'non_trainable_params_log': fixed,
}
)
@rank_zero_only
def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
logs = {}
if self._log_stats.total_params_log:
logs["params/total"] = sum(p.numel() for p in pl_module.parameters())
if self._log_stats.trainable_params_log:
logs["params/trainable"] = sum(p.numel() for p in pl_module.parameters()
if p.requires_grad)
if self._log_stats.non_trainable_params_log:
logs["params/fixed"] = sum(p.numel() for p in pl_module.parameters()
if not p.requires_grad)
if trainer.logger:
trainer.logger.log_hyperparams(logs)
| hyena-dna-main | src/callbacks/params.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def set_affinity(trainer):
try:
from src.utils.gpu_affinity import set_affinity
nproc_per_node = torch.cuda.device_count()
affinity = set_affinity(trainer.local_rank, nproc_per_node, 'socket_unique_continuous')
log.info(f'{trainer.local_rank}: thread affinity: {affinity}')
# TD [2022-05-07] Somehow calling this causes GPU 0 to allocate extra ~800MB of memory per
# number of GPUs (e.g., 6.4GB of extra memory in a 8-GPU setup). H/t Dan.
# l2_promote()
except:
pass
class GpuAffinity(Callback):
"""Set GPU affinity and increase the L2 fetch granularity.
Adapted from https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL
"""
def setup(self, trainer: Trainer, pl_module: LightningModule, stage=None) -> None:
set_affinity(trainer)
| hyena-dna-main | src/callbacks/gpu_affinity.py |
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/wandb_callbacks.py
import glob
import os
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.log_artifact(ckpts)
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, axis=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
class LogDT(Callback):
""" Log the dt values (from NeurIPS 2021 LSSL submission) """
def on_train_epoch_end(self, trainer, pl_module):
log_dict = {}
for name, m in pl_module.model.named_modules():
if pl_module.hparams.train.get('log_dt', False) \
and hasattr(m, "log_dt"):
log_dict[f"{name}.log_dt"] = (
m.log_dt.detach().cpu().numpy().flatten()
)
log_dict[f"{name}.log_dt.image"] = wandb.Image(
m.log_dt.detach().cpu().numpy().flatten().reshape(1, -1)
)
log_dict[f"{name}.log_dt"] = wandb.Table(
dataframe=pd.DataFrame(
{"log_dt": m.log_dt.detach().cpu().numpy().flatten()}
)
)
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
if trainer.logger is not None:
trainer.logger.experiment.log(log_dict)
| hyena-dna-main | src/callbacks/wandb.py |
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/speed_monitor.py
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
class Timer(Callback):
"""Monitor the speed of each step and each epoch.
"""
def __init__(
self,
step: bool = True,
inter_step: bool = True,
epoch: bool = True,
val: bool = True,
):
super().__init__()
self._log_stats = AttributeDict( {
'step_time': step,
'inter_step_time': inter_step,
'epoch_time': epoch,
'val_time': val,
})
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.step_time:
self._snap_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs["timer/inter_step"] = (time.time() - self._snap_inter_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.step_time and self._snap_step_time:
logs["timer/step"] = (time.time() - self._snap_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.epoch_time and self._snap_epoch_time:
logs["timer/epoch"] = time.time() - self._snap_epoch_time
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
def on_validation_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_val_time = time.time()
@rank_zero_only
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.val_time and self._snap_val_time:
logs["timer/validation"] = time.time() - self._snap_val_time
if trainer.logger: trainer.logger.log_metrics(logs) # , step=trainer.global_step)
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| hyena-dna-main | src/callbacks/timer.py |
r"""
Sequence Length Warmup by Reloading
====================
Change sequence lengths according to a stage schedule. The stage parameters sets the sequence length
and batch size.
TODO (not yet supported):
If batch size is not provided for that stage, calculate the batch size based on the
sequence length reshaping into the batch size.
"""
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class SeqlenWarmupReload(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'seq_len': 512, 'epochs': 50},
{'seq_len': 256, 'epochs': 30},
{'seq_len': 128, 'epochs': 20},
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'seq_len', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: seq_len and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Seq Len Warmup: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
# self._update_model(trainer, model) # we don't need to update the model, yet
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
# set new seq len and reset the dataloader
# max_length should be set in the config of the dataloader
seq_len = self.stage_params[self._current_stage]['seq_len']
model.hparams.loader.max_length = seq_len
# we need to resize the batch size too
batch_size = self.stage_params[self._current_stage].get('batch_size', None)
# need to change the dataset params, and the set the phase, which reinits the dataset
model.dataset.max_length = seq_len # progressively update the seq len
# model.dataset.max_length_val = seq_len # we update the val len to be same as train
# model.dataset.max_length_test = seq_len # we don't change the test set, always the longest
model.dataset.batch_size = batch_size # need to adjust the batch size
# model.dataset.batch_size_eval = batch_size * 2 #
# model.dataset.dataset_train.max_length = seq_len
model.dataset.init_datasets() # reinit the datasets with new batch size and seq len
trainer.reset_train_dataloader(model) # tells PTL to use the new dataloaders/datasets
trainer.reset_val_dataloader(model)
print('\tAt epoch {}, changed Seq Len to {}, and batch size to {}'.format(trainer.current_epoch, seq_len, batch_size))
# def _update_model(self, trainer, model):
# if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
# return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
# for module in model.modules():
# if hasattr(module, 'bandlimit'):
# module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
# print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Seq Len Warmup: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
# self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
| hyena-dna-main | src/callbacks/seqlen_warmup_reload.py |
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from omegaconf import OmegaConf
class TrackNorms(pl.Callback):
# TODO do callbacks happen before or after the method in the main LightningModule?
# @rank_zero_only # needed?
def on_after_training_step(self, batch, batch_idx, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Log extra metrics
metrics = {}
if hasattr(pl_module, "_grad_norms"):
metrics.update(pl_module._grad_norms)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
def on_after_backward(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# example to inspect gradient information in tensorboard
if OmegaConf.select(trainer.hparams, 'trainer.track_grad_norms'): # TODO dot notation should work with omegaconf?
norms = {}
for name, p in pl_module.named_parameters():
if p.grad is None:
continue
# param_norm = float(p.grad.data.norm(norm_type))
param_norm = torch.mean(p.grad.data ** 2)
norms[f"grad_norm.{name}"] = param_norm
pl_module._grad_norms = norms
| hyena-dna-main | src/callbacks/norms.py |
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class ProgressiveResizing(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'resolution': 4, 'epochs': 50}, # 32 x 32
{'resolution': 2, 'epochs': 30}, # 64 x 64
{'resolution': 1, 'epochs': 20}, # 128 x 128
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'resolution', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: resolution and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Progressive Resizing: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
model.hparams.loader.train_resolution = self.stage_params[self._current_stage]['resolution']
trainer.reset_train_dataloader(model)
print('\tChanged resolution to {}'.format(self.stage_params[self._current_stage]['resolution']))
def _update_model(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
for module in model.modules():
if hasattr(module, 'bandlimit'):
module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Progressive Resizing: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
| hyena-dna-main | src/callbacks/progressive_resizing.py |
"""
ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from src.dataloaders.base import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc=1, freq="h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates["month"] = dates.date.apply(lambda row: row.month, 1)
dates["day"] = dates.date.apply(lambda row: row.day, 1)
dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1)
dates["hour"] = dates.date.apply(lambda row: row.hour, 1)
dates["minute"] = dates.date.apply(lambda row: row.minute, 1)
dates["minute"] = dates.minute.map(lambda x: x // 15)
freq_map = {
"y": [],
"m": ["month"],
"w": ["month"],
"d": ["month", "day", "weekday"],
"b": ["month", "day", "weekday"],
"h": ["month", "day", "weekday", "hour"],
"t": ["month", "day", "weekday", "hour", "minute"],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack(
[feat(dates) for feat in time_features_from_frequency_str(freq)]
).transpose(1, 0)
class StandardScaler:
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data - mean) / std
def inverse_transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data * std) + mean
class InformerDataset(Dataset):
def __init__(
self,
root_path,
flag="train",
size=None,
features="S",
data_path="ETTh1.csv",
target="OT",
scale=True,
inverse=False,
timeenc=0,
freq="h",
cols=None,
eval_stamp=False,
eval_mask=False,
):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ["train", "test", "val"]
type_map = {"train": 0, "val": 1, "test": 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.eval_stamp = eval_stamp
self.eval_mask = eval_mask
self.forecast_horizon = self.pred_len
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def _borders(self, df_raw):
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
return border1s, border2s
def _process_columns(self, df_raw):
if self.cols:
cols = self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove("date")
return df_raw[["date"] + cols + [self.target]]
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
df_raw = self._process_columns(df_raw)
border1s, border2s = self._borders(df_raw)
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features == "M" or self.features == "MS":
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == "S":
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0] : border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[["date"]][border1:border2]
df_stamp["date"] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_x = np.concatenate(
[seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0
)
if self.inverse:
seq_y = np.concatenate(
[
self.data_x[r_begin : r_begin + self.label_len],
self.data_y[r_begin + self.label_len : r_end],
],
0,
)
raise NotImplementedError
else:
# seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase
seq_y = self.data_y[s_end:r_end]
# OLD in Informer codebase
# seq_x_mark = self.data_stamp[s_begin:s_end]
# seq_y_mark = self.data_stamp[r_begin:r_end]
if self.eval_stamp:
mark = self.data_stamp[s_begin:r_end]
else:
mark = self.data_stamp[s_begin:s_end]
mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0)
if self.eval_mask:
mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0)
else:
mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0)
mask = mask[:, None]
# Add the mask to the timestamps: # 480, 5
# mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1)
seq_x = seq_x.astype(np.float32)
seq_y = seq_y.astype(np.float32)
if self.timeenc == 0:
mark = mark.astype(np.int64)
else:
mark = mark.astype(np.float32)
mask = mask.astype(np.int64)
return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask)
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
@property
def d_input(self):
return self.data_x.shape[-1]
@property
def d_output(self):
if self.features in ["M", "S"]:
return self.data_x.shape[-1]
elif self.features == "MS":
return 1
else:
raise NotImplementedError
@property
def n_tokens_time(self):
if self.freq == 'h':
return [13, 32, 7, 24]
elif self.freq == 't':
return [13, 32, 7, 24, 4]
else:
raise NotImplementedError
class _Dataset_ETT_hour(InformerDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 - self.seq_len,
12 * 30 * 24 + 4 * 30 * 24 - self.seq_len,
]
border2s = [
12 * 30 * 24,
12 * 30 * 24 + 4 * 30 * 24,
12 * 30 * 24 + 8 * 30 * 24,
]
return border1s, border2s
def _process_columns(self, df_raw):
return df_raw
@property
def n_tokens_time(self):
assert self.freq == "h"
return [13, 32, 7, 24]
class _Dataset_ETT_minute(_Dataset_ETT_hour):
def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs):
super().__init__(data_path=data_path, freq=freq, **kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
]
border2s = [
12 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
]
return border1s, border2s
@property
def n_tokens_time(self):
assert self.freq == "t"
return [13, 32, 7, 24, 4]
class _Dataset_Weather(InformerDataset):
def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ECL(InformerDataset):
def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class InformerSequenceDataset(SequenceDataset):
@property
def n_tokens_time(self):
# Shape of the dates: depends on `timeenc` and `freq`
return self.dataset_train.n_tokens_time # data_stamp.shape[-1]
@property
def d_input(self):
return self.dataset_train.d_input
@property
def d_output(self):
return self.dataset_train.d_output
@property
def l_output(self):
return self.dataset_train.pred_len
def _get_data_filename(self, variant):
return self.variants[variant]
_collate_arg_names = ["mark", "mask"] # Names of the two extra tensors that the InformerDataset returns
def setup(self):
self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_
self.dataset_train = self._dataset_cls(
root_path=self.data_dir,
flag="train",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_val = self._dataset_cls(
root_path=self.data_dir,
flag="val",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_test = self._dataset_cls(
root_path=self.data_dir,
flag="test",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
class ETTHour(InformerSequenceDataset):
_name_ = "etth"
_dataset_cls = _Dataset_ETT_hour
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ETTh1.csv",
1: "ETTh2.csv",
}
class ETTMinute(InformerSequenceDataset):
_name_ = "ettm"
_dataset_cls = _Dataset_ETT_minute
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "t",
"cols": None,
}
variants = {
0: "ETTm1.csv",
1: "ETTm2.csv",
}
class Weather(InformerSequenceDataset):
_name_ = "weather"
_dataset_cls = _Dataset_Weather
init_defaults = {
"size": None,
"features": "S",
"target": "WetBulbCelsius",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "WTH.csv",
}
class ECL(InformerSequenceDataset):
_name_ = "ecl"
_dataset_cls = _Dataset_ECL
init_defaults = {
"size": None,
"features": "S",
"target": "MT_320",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ECL.csv",
}
| hyena-dna-main | src/dataloaders/et.py |
from . import et, genomics
from .base import SequenceDataset
| hyena-dna-main | src/dataloaders/__init__.py |
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py
from pathlib import Path
from typing import Any, List, Union
from torch.utils.data.dataloader import DataLoader, Dataset
from transformers import AutoTokenizer
from datasets import Dataset
from src.dataloaders.base import SequenceDataset, default_data_path
from src.dataloaders.fault_tolerant_sampler import RandomFaultTolerantSampler
from src.dataloaders.fault_tolerant_sampler import FaultTolerantDistributedSampler
# genomics datasets
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from src.dataloaders.datasets.hg38_dataset import HG38Dataset
from src.dataloaders.datasets.genomic_bench_dataset import GenomicBenchmarkDataset
from src.dataloaders.datasets.nucleotide_transformer_dataset import NucleotideTransformerDataset
from src.dataloaders.datasets.chromatin_profile_dataset import ChromatinProfileDataset
from src.dataloaders.datasets.species_dataset import SpeciesDataset
from src.dataloaders.datasets.icl_genomics_dataset import ICLGenomicsDataset
from src.dataloaders.datasets.hg38_fixed_dataset import HG38FixedDataset
"""
Dataloaders for genomics datasets, including pretraining and downstream tasks. First works in HyenaDNA project, May 2023.
"""
class HG38(SequenceDataset):
"""
Base class, other dataloaders can inherit from this class.
You must implement the following functions:
- __init__
- setup
You can then use (already have access to) the following functions:
- train_dataloader
- val_dataloader
- test_dataloader
"""
###### very important to set this! ######
_name_ = "hg38" # this name is how the dataset config finds the right dataloader
#########################################
def __init__(self, bed_file, fasta_file, tokenizer_name=None, dataset_config_name=None, max_length=1024, d_output=2, rc_aug=False,
max_length_val=None, max_length_test=None, val_ratio=0.0005, val_split_seed=2357, use_fixed_len_val=False,
add_eos=True, detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
*args, **kwargs):
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug # reverse compliment augmentation
self.max_length = max_length
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.bed_file = bed_file
self.fasta_file = fasta_file
self.use_fixed_len_val = use_fixed_len_val
# handle if file paths are None (default paths)
if self.bed_file is None:
self.bed_file = default_data_path / self._name_ / 'human-sequences.bed'
if self.fasta_file is None:
self.fasta_file = default_data_path / self._name_ / 'hg38.ml.fa'
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
"""Set up the tokenizer and init the datasets."""
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
self.vocab_size = len(self.tokenizer)
self.init_datasets() # creates the datasets. You can also just create this inside the setup() here.
def init_datasets(self):
"""Init the datasets (separate from the tokenizer)"""
# delete old datasets to free memory
if hasattr(self, 'dataset_train'):
self.dataset_train.fasta.seqs.close()
del self.dataset_train.fasta.seqs
# delete old datasets to free memory
if hasattr(self, 'dataset_test'):
self.dataset_test.fasta.seqs.close()
del self.dataset_test.fasta.seqs
# Create all splits: torch datasets
self.dataset_train, self.dataset_val, self.dataset_test = [
HG38Dataset(split=split,
bed_file=self.bed_file,
fasta_file=self.fasta_file,
max_length=max_len,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
add_eos=self.add_eos,
return_seq_indices=False,
shift_augs=None,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'valid', 'test'], [self.max_length, self.max_length_val, self.max_length_test])
]
if self.use_fixed_len_val:
# we're placing the fixed test set in the val dataloader, for visualization!!!
# that means we should track mode with test loss, not val loss
# new option to use fixed val set
print("Using fixed length val set!")
# start end of chr14 and chrX grabbed from Enformer
chr_ranges = {'chr14': [19726402, 106677047],
'chrX': [2825622, 144342320],
}
self.dataset_val = HG38FixedDataset(
chr_ranges=chr_ranges,
fasta_file=self.fasta_file,
max_length=self.max_length,
pad_max_length=self.max_length,
tokenizer=self.tokenizer,
add_eos=True,
)
return
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
if self.shuffle and self.fault_tolerant:
shuffle = False
# TD [2022-12-26]: We need the distributed_sampler_kwargs in case of model parallel:
# In that case the number of replicas and the data parallel rank are more complicated.
distributed_sampler_kwargs = self.trainer.distributed_sampler_kwargs
sampler = (FaultTolerantDistributedSampler(self.dataset_train,
**self.trainer.distributed_sampler_kwargs)
if self.ddp else RandomFaultTolerantSampler(self.dataset_train))
# TD [2022-08-06]: Only the DDP sampler supports fast-forwarding for now
# We assume that it's being resumed with the same number of GPUs
if self.ddp and self.fast_forward_epochs is not None and self.fast_forward_batches is not None:
sampler.load_state_dict({
'epoch': self.fast_forward_epochs,
'counter': self.fast_forward_batches * self.batch_size
})
else:
shuffle = self.shuffle
sampler = None
return self._data_loader(self.dataset_train, batch_size=self.batch_size,
shuffle=shuffle, sampler=sampler)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test, batch_size=self.batch_size_eval)
def _data_loader(self, dataset: Dataset, batch_size: int, shuffle: bool = False,
sampler=None) -> DataLoader:
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=1, # Data is already in memory, we don't need many workers
shuffle=shuffle,
sampler=sampler,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def load_state_dict(self, checkpoint):
if self.fault_tolerant:
self.fast_forward_epochs = checkpoint['loops']['fit_loop']['epoch_progress']['current']['completed']
# TD [2022-08-07] ['epoch_loop.batch_progress']['total']['completed'] is 1 iteration
# behind, so we're using the optimizer's progress. This is set correctly in seq.py.
self.fast_forward_batches = checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['current']['completed']
# At this point the train loader hasn't been constructed yet
class GenomicBenchmark(HG38):
_name_ = "genomic_benchmark"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None,
padding_side='left', val_ratio=0.0005, val_split_seed=2357, add_eos=False,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=True, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if self.dest_path is None:
self.dest_path = default_data_path / self._name_
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side=self.padding_side,
)
# Create all splits: torch datasets (only train/test in this benchmark)
self.dataset_train, self.dataset_val = [
GenomicBenchmarkDataset(split=split,
max_length=max_len,
dataset_name=self.dataset_name,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
use_padding=self.use_padding,
d_output=self.d_output,
add_eos=self.add_eos,
dest_path=self.dest_path,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class NucleotideTransformer(HG38):
_name_ = "nucleotide_transformer"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None,
padding_side='left', val_ratio=0.0005, val_split_seed=2357, add_eos=False,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=True, shuffle_eval=None, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.shuffle_eval = shuffle_eval if shuffle_eval is not None else shuffle # default is to use the same as train shuffle arg
self.pin_memory = pin_memory
self.drop_last = drop_last
if self.dest_path is None:
self.dest_path = default_data_path / self._name_
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side=self.padding_side,
)
# Create all splits: torch datasets (only train/test in this benchmark)
self.dataset_train, self.dataset_val = [
NucleotideTransformerDataset(split=split,
max_length=max_len,
tokenizer=self.tokenizer, # pass the tokenize wrapper
dataset_name = self.dataset_name,
tokenizer_name=self.tokenizer_name,
use_padding=self.use_padding,
d_output=self.d_output,
add_eos=self.add_eos,
dest_path=self.dest_path,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class ChromatinProfile(HG38):
_name_= 'chromatin_profile'
l_output = 0 # need to set this for decoder to work correctly for seq level
def __init__(self, data_path, ref_genome_path, ref_genome_version=None,
tokenizer_name=None, dataset_config_name=None,
max_length=1000, d_output=2, rc_aug=False, add_eos=True, val_only=False,
batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
*args, **kwargs):
self.data_path = data_path
self.ref_genome_path = ref_genome_path
self.ref_genome_version = ref_genome_version
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug # reverse compliment augmentation
self.max_length = max_length
self.add_eos = add_eos
self.val_only=val_only
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
self.vocab_size = len(self.tokenizer)
# Create all splits: torch datasets
if self.val_only:
splits=['val']*3
else:
splits=['train','val','test']
self.dataset_train, self.dataset_val, self.dataset_test = [
ChromatinProfileDataset(
max_length=self.max_length,
ref_genome_path = self.ref_genome_path,
ref_genome_version = self.ref_genome_version,
coords_target_path = f'{self.data_path}/{split}_{self.ref_genome_version}_coords_targets.csv',
tokenizer=self.tokenizer,
tokenizer_name=self.tokenizer_name,
use_padding=True,
)
for split in splits
]
class Species(HG38):
_name_ = "species"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, species: list, species_dir: str, tokenizer_name=None, dataset_config_name=None, d_output=None, max_length=1024, rc_aug=False,
max_length_val=None, max_length_test=None, cache_dir=None, val_ratio=0.0005, val_split_seed=2357,
add_eos=True, detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, chromosome_weights='uniform', species_weights='uniform',
total_size=None, task='species_classification', remove_tail_ends=False, cutoff_train=0.1, cutoff_test=0.2,
*args, **kwargs):
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.rc_aug = rc_aug # reverse compliment augmentation
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.species = species # list of species to load
self.species_dir = species_dir
self.chromosome_weights = chromosome_weights
self.species_weights = species_weights
self.total_size = total_size
self.task = task
self.remove_tail_ends = remove_tail_ends
self.cutoff_train = cutoff_train
self.cutoff_test = cutoff_test
self.d_output = len(self.species)
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
else:
raise ValueError(f"Invalid tokenizer name: {self.tokenizer_name}")
self.vocab_size = len(self.tokenizer)
# Create datasets
self.init_datasets()
def init_datasets(self):
# delete old datasets
# NOTE: For some reason only works to close files for train
if hasattr(self, 'dataset_train'):
for spec in list(self.dataset_train.fastas.keys()):
for chromosome in list(self.dataset_train.fastas[spec].keys()):
self.dataset_train.fastas[spec][chromosome].close()
del self.dataset_train.fastas[spec][chromosome]
if hasattr(self, 'dataset_val'):
pass
if hasattr(self, 'dataset_test'):
pass
# Create all splits: torch datasets
self.dataset_train, self.dataset_val, self.dataset_test = [
SpeciesDataset(species=self.species,
species_dir=self.species_dir,
split=split,
max_length=max_len,
total_size=self.total_size * (1 if split == 'test' else (self.max_length_test + 2) // max_len), # See the same # of tokens every epoch across train/val/test
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
add_eos=self.add_eos,
rc_aug=self.rc_aug,
chromosome_weights=self.chromosome_weights,
species_weights=self.species_weights,
task=self.task,
remove_tail_ends=self.remove_tail_ends,
cutoff_train=self.cutoff_train,
cutoff_test=self.cutoff_test,
)
for split, max_len in zip(['train', 'valid', 'test'], [self.max_length, self.max_length_val, self.max_length_test])
]
return
class ICLGenomics(HG38):
_name_ = "icl_genomics"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None, shots=1, label_to_token=None,
add_eos=True, characters=None, padding_side='left', val_ratio=0.0005, val_split_seed=2357,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=0,
shuffle=True, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
use_shmem=True, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.shots = shots # num shots in ICL sample
self.label_to_token = label_to_token # this maps the label to a token in the vocab already, arbitrary
self.add_eos = add_eos
self.characters = list('ACTGN') if characters is None else characters
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
self.use_shmem = use_shmem
# if self.use_shmem:
# assert cache_dir is not None
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=self.characters,
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
self.vocab_size = len(self.tokenizer)
# Create all splits: torch datasets
self.dataset_train, self.dataset_val = [
ICLGenomicsDataset(
dataset_name=self.dataset_name,
split=split,
shots=self.shots,
use_padding=self.use_padding,
d_output=self.d_output,
max_length=max_len,
dest_path=self.dest_path,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
label_to_token=self.label_to_token,
rc_aug=self.rc_aug,
add_eos=self.add_eos,
)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class HG38Fixed(HG38):
_name_ = "hg38_fixed"
"""Just used for testing a fixed length, *non-overlapping* dataset for HG38."""
def __init__(self, fasta_file=None, chr_ranges=None, pad_max_length=None, batch_size=32,
max_length=None, num_workers=1, add_eos=True,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.fasta_file = fasta_file
self.chr_ranges = chr_ranges
self.max_length = max_length
self.pad_max_length = pad_max_length
self.add_eos = add_eos
self.batch_size = batch_size
self.batch_size_eval = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
if self.fasta_file is None:
self.fasta_file = default_data_path / "hg38" / 'hg38.ml.fa'
if self.chr_ranges is None:
# start end of chr14 and chrX grabbed from Enformer
self.chr_ranges = {'chr14': [19726402, 106677047],
'chrX': [2825622, 144342320],
}
def setup(self, stage=None):
# Create tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length= self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
# we only need one
self.dataset_train = HG38FixedDataset(
fasta_file=self.fasta_file,
chr_ranges=self.chr_ranges, # a dict of chr: (start, end) to use for test set
max_length=self.max_length,
pad_max_length=self.pad_max_length,
tokenizer=tokenizer,
add_eos=self.add_eos,
)
self.dataset_val = self.dataset_train
self.dataset_test = self.dataset_train
# if __name__ == '__main__':
# """Quick test using dataloader. Can't call from here though."""
# loader = HG38(
# bed_file='/home/exnx/enformer-pytorch/data/basenji/human-sequences.bed',
# fasta_file='/home/exnx/enformer-pytorch/data/basenji/hg38.ml.fa',
# tokenizer_name='char_level', max_length=2000
# )
# breakpoint()
# it = iter(ds)
# elem = next(it)
# print(len(elem))
# breakpoint()
| hyena-dna-main | src/dataloaders/genomics.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import math
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSampler):
def __init__(self, *args, generator=None, **kwargs):
# generator = torch.Generator().manual_seed(seed)
# super().__init__(*args, generator=generator, **kwargs)
# TD [2022-07-17]: We don't force the seed to be zero. We generate random seed,
# which should be reproducible if pl.seed_everything was called before hand.
# This means that changing the seed of the experiment will also change the
# sampling order.
if generator is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator().manual_seed(seed)
super().__init__(*args, generator=generator, **kwargs)
self.counter = 0
# self.start_counter = 0
self.restarting = False
def state_dict(self):
return {"random_state": self.state, "counter": self.counter}
def load_state_dict(self, state_dict):
self.generator.set_state(state_dict.get("random_state"))
self.counter = state_dict["counter"]
# self.start_counter = self.counter
self.restarting = True
# TD [2022-08-28] Setting the len will cause PL to think there are only a few batches left per
# epoch, and subsequent epoch will have very few batches.
# def __len__(self):
# # We need a separate self.start_counter because PL seems to call len repeatedly.
# # If we use len(self.data_source) - self.counter then PL will think the epoch ends
# # when we're only half way through.
# return len(self.data_source) - self.start_counter
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
self.state = self.generator.get_state()
indices = torch.randperm(n, generator=self.generator).tolist()
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
# self.start_counter = self.counter
for index in indices:
self.counter += 1
yield index
self.counter = 0
# self.start_counter = self.counter
class FaultTolerantDistributedSampler(DistributedSampler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
# self.start_counter = 0
self.restarting = False
def state_dict(self):
return {"epoch": self.epoch, "counter": self.counter}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
self.counter = state_dict["counter"]
# self.start_counter = self.counter
self.restarting = True
# TD [2022-08-28] Setting the len will cause PL to think there are only a few batches left per
# epoch, and subsequent epoch will have very few batches.
# def __len__(self) -> int:
# return self.num_samples - self.start_counter
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
# self.start_counter = self.counter
for index in indices:
self.counter += 1
yield index
self.counter = 0
# self.start_counter = self.counter | hyena-dna-main | src/dataloaders/fault_tolerant_sampler.py |
""" Datasets for core experimental results """
import os
import pickle
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torchvision
from einops import rearrange
from einops.layers.torch import Rearrange
from src.utils import is_list, permutations
from torch.nn import functional as F
def deprecated(cls_or_func):
def _deprecated(*args, **kwargs):
print(f"{cls_or_func} is deprecated")
return cls_or_func(*args, **kwargs)
return _deprecated
# Default data path is environment variable or hippo/data
if (default_data_path := os.getenv("DATA_PATH")) is None:
default_data_path = Path(__file__).parent.parent.parent.absolute()
default_data_path = default_data_path / "data"
else:
default_data_path = Path(default_data_path).absolute()
class DefaultCollateMixin:
"""Controls collating in the DataLoader
The CollateMixin classes instantiate a dataloader by separating collate arguments with the rest of the dataloader arguments. Instantiations of this class should modify the callback functions as desired, and modify the collate_args list. The class then defines a _dataloader() method which takes in a DataLoader constructor and arguments, constructs a collate_fn based on the collate_args, and passes the rest of the arguments into the constructor.
"""
@classmethod
def _collate_callback(cls, x, *args, **kwargs):
"""
Modify the behavior of the default _collate method.
"""
return x
_collate_arg_names = []
@classmethod
def _return_callback(cls, return_value, *args, **kwargs):
"""
Modify the return value of the collate_fn.
Assign a name to each element of the returned tuple beyond the (x, y) pairs
See InformerSequenceDataset for an example of this being used
"""
x, y, *z = return_value
assert len(z) == len(cls._collate_arg_names), "Specify a name for each auxiliary data item returned by dataset"
return x, y, {k: v for k, v in zip(cls._collate_arg_names, z)}
@classmethod
def _collate(cls, batch, *args, **kwargs):
# From https://github.com/pyforch/pytorch/blob/master/torch/utils/data/_utils/collate.py
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
x = torch.stack(batch, dim=0, out=out)
# Insert custom functionality into the collate_fn
x = cls._collate_callback(x, *args, **kwargs)
return x
else:
return torch.tensor(batch)
@classmethod
def _collate_fn(cls, batch, *args, **kwargs):
"""
Default collate function.
Generally accessed by the dataloader() methods to pass into torch DataLoader
Arguments:
batch: list of (x, y) pairs
args, kwargs: extra arguments that get passed into the _collate_callback and _return_callback
"""
x, y, *z = zip(*batch)
x = cls._collate(x, *args, **kwargs)
y = cls._collate(y)
z = [cls._collate(z_) for z_ in z]
return_value = (x, y, *z)
return cls._return_callback(return_value, *args, **kwargs)
# List of loader arguments to pass into collate_fn
collate_args = []
def _dataloader(self, dataset, **loader_args):
collate_args = {k: loader_args[k] for k in loader_args if k in self.collate_args}
loader_args = {k: loader_args[k] for k in loader_args if k not in self.collate_args}
loader_cls = loader_registry[loader_args.pop("_name_", None)]
return loader_cls(
dataset=dataset,
collate_fn=partial(self._collate_fn, **collate_args),
**loader_args,
)
class SequenceResolutionCollateMixin(DefaultCollateMixin):
"""self.collate_fn(resolution) produces a collate function that subsamples elements of the sequence"""
@classmethod
def _collate_callback(cls, x, resolution=None):
if resolution is None:
pass
else:
# Assume x is (B, L_0, L_1, ..., L_k, C) for x.ndim > 2 and (B, L) for x.ndim = 2
assert x.ndim >= 2
n_resaxes = max(1, x.ndim - 2) # [AG 22/07/02] this line looks suspicious... are there cases with 2 axes?
# rearrange: b (l_0 res_0) (l_1 res_1) ... (l_k res_k) ... -> res_0 res_1 .. res_k b l_0 l_1 ...
lhs = "b " + " ".join([f"(l{i} res{i})" for i in range(n_resaxes)]) + " ..."
rhs = " ".join([f"res{i}" for i in range(n_resaxes)]) + " b " + " ".join([f"l{i}" for i in range(n_resaxes)]) + " ..."
x = rearrange(x, lhs + " -> " + rhs, **{f'res{i}': resolution for i in range(n_resaxes)})
x = x[tuple([0] * n_resaxes)]
return x
@classmethod
def _return_callback(cls, return_value, resolution=None):
return *return_value, {"rate": resolution}
collate_args = ['resolution']
class ImageResolutionCollateMixin(SequenceResolutionCollateMixin):
"""self.collate_fn(resolution, img_size) produces a collate function that resizes inputs to size img_size/resolution"""
_interpolation = torchvision.transforms.InterpolationMode.BILINEAR
_antialias = True
@classmethod
def _collate_callback(cls, x, resolution=None, img_size=None, channels_last=True):
if x.ndim < 4:
return super()._collate_callback(x, resolution=resolution)
if img_size is None:
x = super()._collate_callback(x, resolution=resolution)
else:
x = rearrange(x, 'b ... c -> b c ...') if channels_last else x
_size = round(img_size/resolution)
x = torchvision.transforms.functional.resize(
x,
size=[_size, _size],
interpolation=cls._interpolation,
antialias=cls._antialias,
)
x = rearrange(x, 'b c ... -> b ... c') if channels_last else x
return x
@classmethod
def _return_callback(cls, return_value, resolution=None, img_size=None, channels_last=True):
return *return_value, {"rate": resolution}
collate_args = ['resolution', 'img_size', 'channels_last']
# class SequenceDataset(LightningDataModule):
# [21-09-10 AG] Subclassing LightningDataModule fails due to trying to access _has_setup_fit. No idea why. So we just provide our own class with the same core methods as LightningDataModule (e.g. setup)
class SequenceDataset(DefaultCollateMixin):
registry = {}
_name_ = NotImplementedError("Dataset must have shorthand name")
# Since subclasses do not specify __init__ which is instead handled by this class
# Subclasses can provide a list of default arguments which are automatically registered as attributes
# TODO it might be possible to write this as a @dataclass, but it seems tricky to separate from the other features of this class such as the _name_ and d_input/d_output
@property
def init_defaults(self):
return {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.registry[cls._name_] = cls
def __init__(self, _name_, data_dir=None, **dataset_cfg):
assert _name_ == self._name_
self.data_dir = Path(data_dir).absolute() if data_dir is not None else None
# Add all arguments to self
init_args = self.init_defaults.copy()
init_args.update(dataset_cfg)
for k, v in init_args.items():
setattr(self, k, v)
# The train, val, test datasets must be set by `setup()`
self.dataset_train = self.dataset_val = self.dataset_test = None
self.init()
def init(self):
"""Hook called at end of __init__, override this instead of __init__"""
pass
def setup(self):
"""This method should set self.dataset_train, self.dataset_val, and self.dataset_test."""
raise NotImplementedError
def split_train_val(self, val_split):
"""
Randomly split self.dataset_train into a new (self.dataset_train, self.dataset_val) pair.
"""
train_len = int(len(self.dataset_train) * (1.0 - val_split))
self.dataset_train, self.dataset_val = torch.utils.data.random_split(
self.dataset_train,
(train_len, len(self.dataset_train) - train_len),
generator=torch.Generator().manual_seed(
getattr(self, "seed", 42)
), # PL is supposed to have a way to handle seeds properly, but doesn't seem to work for us
)
def train_dataloader(self, **kwargs):
return self._train_dataloader(self.dataset_train, **kwargs)
def _train_dataloader(self, dataset, **kwargs):
if dataset is None: return
kwargs['shuffle'] = 'sampler' not in kwargs # shuffle cant be True if we have custom sampler
return self._dataloader(dataset, **kwargs)
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_val, **kwargs)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_test, **kwargs)
def _eval_dataloader(self, dataset, **kwargs):
if dataset is None: return
# Note that shuffle=False by default
return self._dataloader(dataset, **kwargs)
def __str__(self):
return self._name_
class ResolutionSequenceDataset(SequenceDataset, SequenceResolutionCollateMixin):
def _train_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if train_resolution is None: train_resolution = [1]
if not is_list(train_resolution): train_resolution = [train_resolution]
assert len(train_resolution) == 1, "Only one train resolution supported for now."
return super()._train_dataloader(dataset, resolution=train_resolution[0], **kwargs)
def _eval_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if dataset is None: return
if eval_resolutions is None: eval_resolutions = [1]
if not is_list(eval_resolutions): eval_resolutions = [eval_resolutions]
dataloaders = []
for resolution in eval_resolutions:
dataloaders.append(super()._eval_dataloader(dataset, resolution=resolution, **kwargs))
return (
{
None if res == 1 else str(res): dl
for res, dl in zip(eval_resolutions, dataloaders)
}
if dataloaders is not None else None
)
class ImageResolutionSequenceDataset(ResolutionSequenceDataset, ImageResolutionCollateMixin):
pass
# Registry for dataloader class
loader_registry = {
None: torch.utils.data.DataLoader, # default case
}
| hyena-dna-main | src/dataloaders/base.py |
import torch
import csv
import pandas as pd
import numpy as np
from tqdm import tqdm
import liftover
from pathlib import Path
from pyfaidx import Fasta
from random import randrange, random
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
# max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
# self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
# remove tail end, might be gibberish code
# truncate_len = int(len(self.seqs[chr_name]) * 0.9)
# self.chr_lens[chr_name] = truncate_len
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, max_length, return_augs = False):
"""
max_length passed from dataset, not from init
"""
interval_length = end - start
chromosome = self.seqs[chr_name]
# chromosome_length = len(chromosome)
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if interval_length < max_length:
extra_seq = max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > max_length:
end = start + max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class ChromatinProfileDataset(torch.utils.data.Dataset):
'''
Recreation of chromatin profile prediction benchmark from BigBird paper https://arxiv.org/abs/2007.14062
Original sequence coordinates and target labels are provided via a csv.
Original sequences have a length of 1000. This is changed to be max_length on the fly.
Target labels are read into a LongTensor. Coordinates are read into a DataFrame with columns "Chr_No" (0-based), "Start" and "End".
Original coordinates are in hg19 format named as train_hg19_coords_targets.csv etc.
Hg19 coordinates will be translated to hg38 if ref_genome_version=='hg38'.
The translated coordinated can be saved to a new file e.g. train_hg19_coords_targets.csv so this only needs to be done once.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
max_length,
ref_genome_path=None,
ref_genome_version=None,
coords_target_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False,
save_liftover=False,
):
self.max_length = max_length
assert max_length%2==0 # check window is divisible by 2
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.rc_aug = rc_aug
self.ref_genome_version = ref_genome_version
# self.ref_genome = FastaInterval(fasta_file=ref_genome_path, max_length=self.max_length)
self.ref_genome = FastaInterval(fasta_file=ref_genome_path)
# Original data coordinates are from hg19.
# If ref genome is hg38 and original coordinates are provided these must be translated by liftover.
# Conversion only needs to be done once so save liftover coordinates to file optionally.
if self.ref_genome_version=='hg19':
if 'hg19' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
else:
raise ValueError('Make sure data coordinates are in hg19 format (and put "hg19" in filename)')
elif self.ref_genome_version=='hg38':
if 'hg38' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
elif 'hg19' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
print('ref_genome_version = "hg38" but target coordinates are labelled "hg19"')
self.convert_coordinates(coords_target_path, save_liftover)
else:
raise ValueError('Make sure data coordinates have correct hg19/hg38 in filename')
else:
raise ValueError('ref_genome_version must be "hg19" or "hg38"')
# Move start/end to new window
# Window = 1000 used in raw coordinate data
self.coords['Start'] = self.coords['Start']-int((max_length-1000)/2)
self.coords['End'] = self.coords['End']+int((max_length-1000)/2)
def load_csv_data(self, coords_target_path):
# Grab sequence coordinates from csv
self.coords = pd.read_csv(
coords_target_path,
usecols=['Chr_No','Start','End'],
dtype={'Chr_No':np.int64,'Start':np.int64,'End':np.int64}
).reset_index(drop=True) # Note Chr_No is zero-based
# Quickly grab target column names
with open(coords_target_path, "r") as f:
reader = csv.reader(f)
header = next(reader)
self.target_columns = [col for col in header if col[:2]=='y_' ]
# Grab targets from csv and convert to torch long format
self.targets = torch.from_numpy(
pd.read_csv(
coords_target_path,
usecols=self.target_columns,
dtype={k:bool for k in self.target_columns}
).to_numpy()
).long()
def __len__(self):
return len(self.coords)
def __getitem__(self, idx):
y = self.targets[idx]
coord = self.coords.iloc[idx]
seq = self.ref_genome(
'chr{}'.format(coord['Chr_No']+1), # Make chromosome id 1-based
coord['Start'],
coord['End'],
max_length=self.max_length,
)
# # apply rc_aug here if using
# if self.rc_aug and coin_flip():
# seq = string_reverse_complement(seq)
if self.tokenizer==None:
return seq, y
x = self.tokenizer(seq.upper()) # Apply upper() incase ref genome is soft masked
x = torch.LongTensor(x["input_ids"]) # Grab input ids and convert to LongTensorx
return x, y
def convert_coordinates(self, coords_target_path, save_liftover):
'''
Loop through coordinates and translate from hg19 to hg38.
Filter entries where liftover fails.
Save this to file so we only have to do it once.
'''
converter = liftover.get_lifter('hg19', 'hg38')
print("Translating coordinates from hg19 to hg38:")
for i in tqdm(range(len(self.coords))):
row = self.coords.iloc[i]
new_start = converter['chr{}'.format(row['Chr_No']+1)][row['Start']]
new_end = converter['chr{}'.format(row['Chr_No']+1)][row['End']]
if (len(new_start) == 0) or (len(new_end) == 0):
# If liftover fails set -999 for filtering
self.coords.iloc[i]['Start']=-999
else:
self.coords.iloc[i]['Start']=new_start[0][1]
self.coords.iloc[i]['End']=new_end[0][1]
# Filter unmapped coordinates
n_before = len(self.coords)
self.coords = self.coords.query('Start!=-999')
n_after = len(self.coords)
print('Filtered {} unmapped coordinates. There are {} samples remaining'.format(n_before-n_after, n_after))
# Filter incorrect window sizes
n_before=n_after
self.coords = self.coords.query('End-Start==1000')
n_after = len(self.coords)
print('Filtered {} incorrect window sizes. There are {} samples remaining'.format(n_before-n_after, n_after))
# Reindex targets based on filtered coordinates and reset coordinate index
self.targets = self.targets[self.coords.index.to_numpy()]
self.coords.reset_index(inplace=True, names=['filter_index'])
assert len(self.targets) == len(self.coords) # Sanity check
if save_liftover: # save liftover coords in original format and change filename accordingly
hg38_coords_targets = pd.concat([self.coords, pd.DataFrame(columns=self.target_columns, data=self.targets)], axis=1)
print('Saving translated and filtered data to {}'.format(coords_target_path.replace('hg19','hg38')))
hg38_coords_targets.to_csv(coords_target_path.replace('hg19','hg38'))
del hg38_coords_targets | hyena-dna-main | src/dataloaders/datasets/chromatin_profile_dataset.py |
from pyfaidx import Fasta
import torch
from random import random
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class NucleotideTransformerDataset(torch.utils.data.Dataset):
'''
Loop thru fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
max_length,
dataset_name=None,
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name
assert base_path.exists(), 'path to fasta file must exist'
for file in (base_path.iterdir()):
if str(file).endswith('.fasta') and split in str(file):
self.seqs = Fasta(str(file), read_long_names=True)
self.label_mapper = {}
for i, key in enumerate(self.seqs.keys()):
self.label_mapper[i] = (key, int(key.rstrip()[-1]))
def __len__(self):
return len(self.seqs.keys())
def __getitem__(self, idx):
seq_id = self.label_mapper[idx][0]
x = self.seqs[seq_id][:].seq # only one sequence
y = self.label_mapper[idx][1] # 0 or 1 for binary classification
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# need to wrap in list
target = torch.LongTensor([y]) # offset by 1, includes eos
return seq, target
| hyena-dna-main | src/dataloaders/datasets/nucleotide_transformer_dataset.py |
from itertools import islice
from functools import partial
import os
import functools
# import json
# from pathlib import Path
# from pyfaidx import Fasta
# import polars as pl
# import pandas as pd
import torch
from random import randrange, random
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.data_check import info
from genomic_benchmarks.data_check import list_datasets
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.dataset_getters import pytorch_datasets
from genomic_benchmarks.data_check import is_downloaded
from src.dataloaders.base import default_data_path
"""
Genomic Benchmarks Dataset, from:
https://github.com/ML-Bioinfo-CEITEC/genomic_benchmarks
"""
# helper functions
def exists(val):
return val is not None
def identity(t):
return t
def cast_list(t):
return t if isinstance(t, list) else [t]
def coin_flip():
return random() > 0.5
# genomic function transforms
seq_indices_embed = torch.zeros(256).long()
seq_indices_embed[ord('a')] = 0
seq_indices_embed[ord('c')] = 1
seq_indices_embed[ord('g')] = 2
seq_indices_embed[ord('t')] = 3
seq_indices_embed[ord('n')] = 4
seq_indices_embed[ord('A')] = 0
seq_indices_embed[ord('C')] = 1
seq_indices_embed[ord('G')] = 2
seq_indices_embed[ord('T')] = 3
seq_indices_embed[ord('N')] = 4
seq_indices_embed[ord('.')] = -1
one_hot_embed = torch.zeros(256, 4)
one_hot_embed[ord('a')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('c')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('g')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('t')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('n')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('A')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('C')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('G')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('T')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('N')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('.')] = torch.Tensor([0.25, 0.25, 0.25, 0.25])
reverse_complement_map = torch.Tensor([3, 2, 1, 0, 4]).long()
def torch_fromstring(seq_strs):
batched = not isinstance(seq_strs, str)
seq_strs = cast_list(seq_strs)
np_seq_chrs = list(map(lambda t: np.fromstring(t, dtype = np.uint8), seq_strs))
seq_chrs = list(map(torch.from_numpy, np_seq_chrs))
return torch.stack(seq_chrs) if batched else seq_chrs[0]
def str_to_seq_indices(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return seq_indices_embed[seq_chrs.long()]
def str_to_one_hot(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return one_hot_embed[seq_chrs.long()]
def seq_indices_to_one_hot(t, padding = -1):
is_padding = t == padding
t = t.clamp(min = 0)
one_hot = F.one_hot(t, num_classes = 5)
out = one_hot[..., :4].float()
out = out.masked_fill(is_padding[..., None], 0.25)
return out
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
def seq_indices_reverse_complement(seq_indices):
complement = reverse_complement_map[seq_indices.long()]
return torch.flip(complement, dims = (-1,))
def one_hot_reverse_complement(one_hot):
*_, n, d = one_hot.shape
assert d == 4, 'must be one hot encoding with last dimension equal to 4'
return torch.flip(one_hot, (-1, -2))
class GenomicBenchmarkDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
max_length,
dataset_name="human_nontata_promoters",
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
if not is_downloaded(dataset_name, cache_path=dest_path):
print("downloading {} to {}".format(dataset_name, dest_path))
download_dataset(dataset_name, version=0, dest_path=dest_path)
else:
print("already downloaded {}-{}".format(split, dataset_name))
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name / split
self.all_paths = []
self.all_labels = []
label_mapper = {}
for i, x in enumerate(base_path.iterdir()):
label_mapper[x.stem] = i
for label_type in label_mapper.keys():
for x in (base_path / label_type).iterdir():
self.all_paths.append(x)
self.all_labels.append(label_mapper[label_type])
def __len__(self):
return len(self.all_paths)
def __getitem__(self, idx):
txt_path = self.all_paths[idx]
with open(txt_path, "r") as f:
content = f.read()
x = content
y = self.all_labels[idx]
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# need to wrap in list
target = torch.LongTensor([y]) # offset by 1, includes eos
return seq, target
if __name__ == '__main__':
"""Quick test loading dataset.
example
python -m src.dataloaders.datasets.genomic_bench_dataset
"""
max_length = 300 # max len of seq grabbed
use_padding = True
dest_path = "data/genomic_benchmark/"
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
# not sure why tokenizer needs max len
model_max_length=max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side='left',
)
ds = GenomicBenchmarkDataset(
max_length = max_length,
use_padding = use_padding,
split = 'train', #
tokenizer=tokenizer,
tokenizer_name='char',
dest_path=dest_path,
# add_eos=False,
)
# it = iter(ds)
# elem = next(it)
# print('elem[0].shape', elem[0].shape)
# print(elem)
# breakpoint() | hyena-dna-main | src/dataloaders/datasets/genomic_bench_dataset.py |
"""
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
class CharacterTokenizer(PreTrainedTokenizer):
def __init__(self, characters: Sequence[str], model_max_length: int, padding_side: str='left', **kwargs):
"""Character tokenizer for Hugging Face transformers.
Args:
characters (Sequence[str]): List of desired characters. Any character which
is not included in this list will be replaced by a special token called
[UNK] with id=6. Following are list of all of the special tokens with
their corresponding ids:
"[CLS]": 0
"[SEP]": 1
"[BOS]": 2
"[MASK]": 3
"[PAD]": 4
"[RESERVED]": 5
"[UNK]": 6
an id (starting at 7) will be assigned to each character.
model_max_length (int): Model maximum sequence length.
"""
self.characters = characters
self.model_max_length = model_max_length
bos_token = AddedToken("[BOS]", lstrip=False, rstrip=False)
eos_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
sep_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
cls_token = AddedToken("[CLS]", lstrip=False, rstrip=False)
pad_token = AddedToken("[PAD]", lstrip=False, rstrip=False)
unk_token = AddedToken("[UNK]", lstrip=False, rstrip=False)
mask_token = AddedToken("[MASK]", lstrip=True, rstrip=False)
super().__init__(
bos_token=bos_token,
eos_token=sep_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
unk_token=unk_token,
add_prefix_space=False,
model_max_length=model_max_length,
padding_side=padding_side,
**kwargs,
)
self._vocab_str_to_int = {
"[CLS]": 0,
"[SEP]": 1,
"[BOS]": 2,
"[MASK]": 3,
"[PAD]": 4,
"[RESERVED]": 5,
"[UNK]": 6,
**{ch: i + 7 for i, ch in enumerate(characters)},
}
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
@property
def vocab_size(self) -> int:
return len(self._vocab_str_to_int)
def _tokenize(self, text: str) -> List[str]:
return list(text)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["[UNK]"])
def _convert_id_to_token(self, index: int) -> str:
return self._vocab_int_to_str[index]
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
result = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
result += ([0] * len(token_ids_1)) + [1]
return result
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = len(cls + token_ids_0 + sep) * [0]
if token_ids_1 is not None:
result += len(token_ids_1 + sep) * [1]
return result
def get_config(self) -> Dict:
return {
"char_ords": [ord(ch) for ch in self.characters],
"model_max_length": self.model_max_length,
}
@classmethod
def from_config(cls, config: Dict) -> "CharacterTokenizer":
cfg = {}
cfg["characters"] = [chr(i) for i in config["char_ords"]]
cfg["model_max_length"] = config["model_max_length"]
return cls(**cfg)
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
cfg = self.get_config()
with open(cfg_file, "w") as f:
json.dump(cfg, f, indent=4)
@classmethod
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
with open(cfg_file) as f:
cfg = json.load(f)
return cls.from_config(cfg) | hyena-dna-main | src/dataloaders/datasets/hg38_char_tokenizer.py |
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random
import numpy as np
"""
Dataset for sampling arbitrary intervals from the human genome.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
# max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
# self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
# remove tail end, might be gibberish code
# truncate_len = int(len(self.seqs[chr_name]) * 0.9)
# self.chr_lens[chr_name] = truncate_len
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, max_length, return_augs = False):
"""
max_length passed from dataset, not from init
"""
interval_length = end - start
chromosome = self.seqs[chr_name]
# chromosome_length = len(chromosome)
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
# checks if not enough sequence to fill up the start to end
if interval_length < max_length:
extra_seq = max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
start = 0
if end > chromosome_length:
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > max_length:
end = start + max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
return seq
class HG38Dataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
'''
def __init__(
self,
split,
bed_file,
fasta_file,
max_length,
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
# read bed file
df_raw = pd.read_csv(str(bed_path), sep = '\t', names=['chr_name', 'start', 'end', 'split'])
# select only split df
self.df = df_raw[df_raw['split'] == split]
self.fasta = FastaInterval(
fasta_file = fasta_file,
# max_length = max_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug
)
def __len__(self):
return len(self.df)
def replace_value(self, x, old_value, new_value):
return torch.where(x == old_value, new_value, x)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
# sample a random row from df
row = self.df.iloc[idx]
# row = (chr, start, end, split)
chr_name, start, end = (row[0], row[1], row[2])
seq = self.fasta(chr_name, start, end, max_length=self.max_length, return_augs=self.return_augs)
if self.tokenizer_name == 'char':
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
elif self.tokenizer_name == 'bpe':
seq = self.tokenizer(seq,
# add_special_tokens=False,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
)
# get input_ids
if self.add_eos:
seq = seq["input_ids"][1:] # remove the bos, keep the eos token
else:
seq = seq["input_ids"][1:-1] # remove both special tokens
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# replace N token with a pad token, so we can ignore it in the loss
seq = self.replace_value(seq, self.tokenizer._vocab_str_to_int['N'], self.tokenizer.pad_token_id)
data = seq[:-1].clone() # remove eos
target = seq[1:].clone() # offset by 1, includes eos
return data, target
| hyena-dna-main | src/dataloaders/datasets/hg38_dataset.py |
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self, tokens, seq_len, drop_last=True):
"""tokens should be a numpy array
"""
self.seq_len = seq_len
ntokens = len(tokens)
if drop_last:
ntokens = ((ntokens - 1) // seq_len) * seq_len + 1
self.ntokens = ntokens
# We're careful not to slice tokens, since it could be a memmap'ed array or H5 dataset,
# and slicing would load it to memory.
self.tokens = tokens
self.total_sequences = math.ceil((self.ntokens - 1) / self.seq_len)
def __len__(self):
return self.total_sequences
def __getitem__(self, idx):
start_idx = idx * self.seq_len
seq_len = min(self.seq_len, self.ntokens - 1 - start_idx)
data = torch.as_tensor(self.tokens[start_idx:(start_idx + seq_len + 1)].astype(np.int64))
return data[:-1], data[1:].clone() | hyena-dna-main | src/dataloaders/datasets/lm_dataset.py |
import torch
from random import random, randint
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.data_check import is_downloaded
"""
In-Context learning version of Genomic Benchmarks Dataset
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class ICLGenomicsDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split: str,
shots: int,
max_length: int,
label_to_token: dict=None,
dataset_name="human_nontata_promoters",
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=True, # need this for current ICL setup
eos_token=None, # end of sequence token (None defaults to tokenizer.sep_token)
rc_aug=False,
):
self.shots = shots
self.label_to_token = {0: 'A', 1: 'N'} if label_to_token is None else label_to_token
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.add_eos = add_eos
self.eos_token = eos_token
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
if not is_downloaded(dataset_name, cache_path=dest_path):
print("downloading {} to {}".format(dataset_name, dest_path))
download_dataset(dataset_name, version=0, dest_path=dest_path)
else:
print("already downloaded {}-{}".format(split, dataset_name))
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name / split
self.all_paths = []
self.all_labels = []
label_mapper = {}
for i, x in enumerate(base_path.iterdir()):
label_mapper[x.stem] = i
for label_type in label_mapper.keys():
for x in (base_path / label_type).iterdir():
self.all_paths.append(x)
self.all_labels.append(label_mapper[label_type])
self.unique_labels = label_mapper.values()
self.n_samples = len(self.all_paths)
def __len__(self):
return self.n_samples
def get_sample_from_idx(self, idx):
txt_path = self.all_paths[idx]
with open(txt_path, "r") as f:
content = f.read()
x = content
y = self.all_labels[idx]
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
if len(self.label_to_token[y])>1:
# to get cls token, we can't use the normal self.tokenizer, which will split into separate chars,
# we need to lookup the vocab dict directly, while using UNK by default if not found
# use the chr_name as the cls token
target = [self.tokenizer._vocab_str_to_int.get(self.label_to_token[y], self.tokenizer._vocab_str_to_int["[UNK]"])]
else:
target = self.tokenizer(self.label_to_token[y], add_special_tokens=False)['input_ids']
# need to handle eos here
eos_token = [self.tokenizer.sep_token_id] if not exists(self.eos_token) else self.tokenizer(self.eos_token, add_special_tokens=False)['input_ids']
if self.add_eos:
seq = seq + eos_token
if self.add_eos:
target = target + eos_token
# convert to tensor
seq = torch.LongTensor(seq)
target = torch.LongTensor(target)
return seq, target
def __getitem__(self, idx):
test_seq, test_target = self.get_sample_from_idx(idx)
test_target = test_target[0].unsqueeze(0)
if self.shots==0:
return test_seq, test_target
shot_indices = {}
for label in self.unique_labels:
label_indices = np.where(np.array(self.all_labels)==label)[0]
label_indices = np.array([i for i in label_indices if i!=idx])
shot_indices[label] = np.random.choice(label_indices, size=self.shots, replace=False)
shots = []
for shot in range(self.shots):
for label in shot_indices:
seq, target = self.get_sample_from_idx(shot_indices[label][shot])
shots.append(torch.cat([seq, target],dim=0))
# lets shuffle the shots to avoid always having the same order
np.random.shuffle(shots)
shots = torch.cat([torch.cat(shots, dim=0), test_seq], dim=0)
return shots, test_target | hyena-dna-main | src/dataloaders/datasets/icl_genomics_dataset.py |
from itertools import islice
from functools import partial
# import tensorflow as tf
import os
import functools
import json
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random, randint
import numpy as np
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
"""
Modifying the hg38 pretraining dataset to include the chromosome token as a class token at the end. This
will help introduce the concept of class appending for ICL in the down stream.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file), sequence_always_upper=True)
self.return_seq_indices = return_seq_indices
self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, return_augs = False):
interval_length = end - start
chromosome = self.seqs[chr_name]
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if exists(self.max_length) and interval_length < self.max_length:
extra_seq = self.max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > self.max_length:
end = start + self.max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class ICL_HG38Dataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
bed_file,
fasta_file,
max_length,
min_length=None,
variable_length=False, # if you want a var length between min and max length, else len = max_length always
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False
):
self.min_length = min_length if min_length is not None else 0.25 * max_length
self.max_length = max_length
self.variable_length = variable_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
# read bed file
df_raw = pd.read_csv(str(bed_path), sep = '\t', names=['chr_name', 'start', 'end', 'split'])
# select only split df
self.df = df_raw[df_raw['split'] == split]
self.fasta = FastaInterval(
fasta_file = fasta_file,
max_length = max_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug,
)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
# sample a random row from df
row = self.df.iloc[idx]
# row = (chr, start, end, split)
chr_name, start, end = (row[0], row[1], row[2])
seq = self.fasta(chr_name, start, end, return_augs=self.return_augs)
if self.variable_length:
# sample a random len between min and max
seq_len = randint(self.min_length, self.max_length)
seq = seq[:seq_len]
if self.variable_length:
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.max_length,
truncation=True,
add_special_tokens=False,
)
else:
# fixed size each time
seq = self.tokenizer(seq,
add_special_tokens=False,
max_length=self.pad_max_length
)
seq = seq["input_ids"] # get input_ids
sep_token = self.tokenizer.sep_token_id
# to get cls token, we can't use the normal self.tokenizer, which will split into separate chars,
# we need to lookup the vocab dict directly, while using UNK by default if not found
# use the chr_name as the cls token
cls_token = self.tokenizer._vocab_str_to_int.get(chr_name, self.tokenizer._vocab_str_to_int["[UNK]"])
# build token ICL sample structure
# x = seq[1:] + sep + cls
# remove 1 from left side (pad side) so that we can add an extra sep_token between, and still have max_length seq
# need to wrap single tokens in a list to be able to add this way
seq_sample = seq[1:] + [sep_token] + [cls_token]
# convert to tensor
seq_sample = torch.LongTensor(seq_sample)
data = seq_sample[:-1].clone() # remove cls token in data, (or input x)
target = seq_sample[1:].clone() # offset by 1, includes cls token
return data, target | hyena-dna-main | src/dataloaders/datasets/hg38_icl_dataset.py |
import os
from pathlib import Path
from pyfaidx import Fasta
import torch
import shutil
import gzip
import random
from typing import Optional, Union, Dict, List
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
import collections
"""
Dataset that randomly samples sequences of length (X) from a species' whole genome.
Given a specific species, it will...
1. Randomly sample a chromosome from that species
2. Randomly sample a sequence of length X from that chromosome
All sampled sequences will be the same size.
If a sequence is truncated by the end of a chromosome, it will be padded with 'N'
Char sequences (not one hots yet)
No augmentations yet.
"""
# Determine chromosomes to use for train/test split
SPECIES_CHROMOSOME_SPLITS = {
'human' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'lemur' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'goat' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'sheep' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'pig' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'mouse' : {
'train' : [ '2', '4', '6', '8', '14', '15', '16', '17', '18', '19', 'X', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'gorilla' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'orangutan' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'chimpanzee' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'hippo' : {
'train' : [ '2', '4', '6', '8', '14', '15', '16', '17', 'X', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
}
}
class SpeciesDataset(torch.utils.data.Dataset):
'''
Loop thru fasta files (separated by chromosome) and return a sequence of length `max_length` from a random chromosome.
'''
def __init__(
self,
species: list,
species_dir: str,
split: str,
max_length,
total_size,
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
rc_aug=False,
return_augs=False,
chromosome_weights: Optional[Union[Dict[str, List[float]], str]]='uniform',
species_weights: Optional[Union[List[float], str]]='uniform',
task='species_classification|next_token_pred',
remove_tail_ends=False,
cutoff_train=0.1,
cutoff_test=0.2,
):
"""
`chromosome_weights` => can be either...
- String of form 'uniform|weighted_by_bp', in which case every species' chromosomes will be sampled accordingly
- Dict of form {species: [chromosome weight1, chromosome weight 2, ...]
`species_weights` => can be either...
- String of form 'uniform|weighted_by_bp'
- List of form [ species weight1, species weight2, ... ]
"""
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.species = species
self.species_dir = species_dir
self.split = split
self.total_size = total_size
self.task = task
self.d_output = len(self.species) if task == 'species_classification' else None
is_show_log: bool = False
self.remove_tail_ends = remove_tail_ends
self.cutoff_train = cutoff_train
self.cutoff_test = cutoff_test
if task == 'species_classification' and self.d_output < 2:
print(f'Note that `d_output` should be >= 2 for task `{task}`, otherwise you are only predicting one class. Got {self.d_output}')
# Store FASTAs for each species
self.fastas: Dict[str, Dict[str, Fasta]] = collections.defaultdict(dict) # [key] = species -> dict where [key] = chromosome, [value] = Fasta object
self.chromosomes: Dict[str, List[str]] = {} # [key] = species, [value] = list of chromosomes in this split
self.chromosome_weights: Dict[str, List[float]] = {} # [key] = species, [value] = list where [idx] = self.chromosomes[species][idx], [value] = weight
self.species_weights: List[float] = [] # [idx] = self.species[idx], [value] = weight
# For every species in `self.species`, load all chromosomes belonging to `split`
for spec in self.species:
species_path = Path(self.species_dir) / spec
assert species_path.exists(), f'The path `{species_path}` does not exist for species `{spec}`. Please point to a valid directory containing your species fna.gz files.'
# Select chromosomes for this split
assert spec in SPECIES_CHROMOSOME_SPLITS, f'Unrecognized species `{spec}`. Valid species are: {list(SPECIES_CHROMOSOME_SPLITS.keys())}.'
self.chromosomes[spec] = SPECIES_CHROMOSOME_SPLITS[spec][split]
# Load all .fna files of chromosomes in this split
for chromosome in self.chromosomes[spec]:
# Unzip if necessary
gz_file_path = os.path.join(species_path, f'chr{chromosome}.fna.gz')
if os.path.exists(gz_file_path) and not (
os.path.exists(os.path.join(species_path, f'chr{chromosome}.fna')) or
os.path.exists(os.path.join(species_path, f'chr{chromosome}.fa'))
):
if is_show_log:
print(f"Unzipping {gz_file_path}...")
with gzip.open(gz_file_path, 'rb') as f_in:
with open(os.path.join(species_path, f'chr{chromosome}.fna'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# Read .fna or .fa file, whichever we can find
file_paths = [ os.path.join(species_path, x) for x in [ f'chr{chromosome}.fna', f'chr{chromosome}.fa' ] ]
is_file_found: bool = False
for file_path in file_paths:
if os.path.exists(file_path):
if chromosome not in self.fastas[spec]:
self.fastas[spec][chromosome] = Fasta(file_path, sequence_always_upper=True)
is_file_found = True
if not is_file_found:
raise FileNotFoundError(f'Could not find any of these files: `{file_paths}`. Please point to a valid directory containing all .fna files for species `{spec}`.\nExpected chromosomes: {self.chromosomes[spec]}.')
if is_show_log:
print(f"Species: {spec}")
print(f"Split: {split}")
print(f"Chromosomes: {self.chromosomes[spec]}")
print(f"Loaded {len(self.fastas[spec])} FASTA files from {species_path}: {list(self.fastas[spec].keys())}")
# Set chromosome weights for sampling
if isinstance(chromosome_weights, dict):
assert len(chromosome_weights) == len(self.species), f"`chromosome_weights` must have a weight for each species. Expected {len(self.species)} weights, instead got {len(chromosome_weights)}."
self.chromosome_weights = chromosome_weights
elif chromosome_weights == 'uniform':
self.chromosome_weights = {
spec: 'uniform'
for spec in self.species
}
elif chromosome_weights == 'weighted_by_bp':
self.chromosome_weights = {
spec: 'weighted_by_bp'
for spec in self.species
}
else:
raise ValueError(f"Invalid chromosome_weights: {chromosome_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
for spec, strategy_or_weights in self.chromosome_weights.items():
if isinstance(strategy_or_weights, str):
if strategy_or_weights == 'uniform':
# Uniform weights
self.chromosome_weights[spec] = [1] * len(self.chromosomes[spec])
elif strategy_or_weights == 'weighted_by_bp':
# Weight by number of base pairs in each chromosome
self.chromosome_weights[spec] = [
len(self.fastas[spec][chromosome])
for chromosome in self.chromosomes[spec]
]
self.chromosome_weights[spec] = [w / sum(self.chromosome_weights[spec]) for w in self.chromosome_weights[spec]]
else:
raise ValueError(f"Invalid chromosome_weights strategy: {strategy_or_weights}. Must be 'uniform' or 'weighted_by_bp'.")
elif isinstance(strategy_or_weights, list):
# Check that all chromosomes are accounted for
assert set(strategy_or_weights.keys()) == set(self.chromosomes[spec]), f"`chromosome_weights` must have a weight for each chromosome. Expected {self.chromosomes[spec]}, instead got {strategy_or_weights.keys()}."
self.chromosome_weights[spec] = strategy_or_weights
else:
raise ValueError(f"Invalid chromosome_weights: {chromosome_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
# Set species weights for sampling
if isinstance(species_weights, list):
assert len(species_weights) == len(self.species), f"`species_weights` must have a weight for each species. Expected {len(self.species)} weights, instead got {len(species_weights)}."
self.species_weights = species_weights
elif species_weights == 'uniform':
# Uniform weights
self.species_weights = [1] * len(self.species)
elif species_weights == 'weighted_by_bp':
# Weight by number of base pairs in each chromosome
self.species_weights = [
sum([
len(fasta)
for fasta in self.fastas[spec].values()
])
for spec in self.species
]
self.species_weights = [w / sum(self.species_weights) for w in self.species_weights]
else:
raise ValueError(f"Invalid species_weights: {species_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
if is_show_log:
print(f"Species weights: {list(zip(self.species, self.species_weights))}")
print(f"Chromosome weights: {self.chromosome_weights}")
def __len__(self):
assert self.total_size is not None, "Must set the `total_size` kwarg when you initialize `SpeciesDataset` before calling `__len__`."
return self.total_size
def __getitem__(self, idx):
"""Returns a sequence of length `max_length` from a random chromosome of a random species."""
is_show_log: bool = False
# sample a random species (according to weighting)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx)
spec: str = random.choices(self.species, weights=self.species_weights, k=1)[0]
# sample a random chromosome (according to weighting)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx + 1)
chromosome = random.choices(self.chromosomes[spec], weights=self.chromosome_weights[spec], k=1)[0]
# sample a random sequence of length `self.max_length` from this chromosome
# print("****", spec, chromosome, self.fastas[spec].keys(), idx)
fasta = self.fastas[spec][chromosome][0] # idx into 0 b/c only one fasta per chromosome
chromosome_length: int = len(fasta)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx + 2)
if self.remove_tail_ends:
if self.split == 'train':
cutoff = self.cutoff_train
else:
cutoff = self.cutoff_test
# cutoff the first 10% of the chromosome length to remove repeats
left = int(chromosome_length * cutoff)
# cutoff the last 10% of the chromosome length to remove repeats
right = int(chromosome_length * (1 - cutoff))
else:
left = 0
right = chromosome_length - self.max_length
start: int = random.randint(left, right)
end: int = start + self.max_length
seq = str(fasta[start:min(end, right)])
# pad with Ns if necessary
seq = seq.rjust(end - start, "N")
assert len(seq) == self.max_length, f'Length of sequence ({len(seq)}) from interval ({start}, {end}) of chromosome {chromosome} (len={chromosome_length}) is not equal to `self.max_length` ({self.max_length})'
if is_show_log:
print(f"Sampled species: {spec}")
print(f"Sampled chromosome: {chromosome}")
print(f"Sampled sequence ({start}, {end}) of len={len(seq)}: {seq[:10]}...{seq[-10:]}")
assert self.tokenizer is not None, f"Tokenizer cannot be `None`."
if self.tokenizer_name == 'char':
seq = self.tokenizer(seq, add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
elif self.tokenizer_name == 'bpe':
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
) # add cls and eos token (+2)
# get input_ids
if self.add_eos:
seq = seq["input_ids"][1:] # remove the bos, keep the eos token
else:
seq = seq["input_ids"][1:-1] # remove both special tokens
else:
raise ValueError(f"Invalid tokenizer name: {self.tokenizer_name}")
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
data = seq[:-1].clone() # remove eos
if self.task == 'next_token_pred':
target = seq[1:].clone() # offset by 1, includes eos
elif self.task == 'species_classification':
target = self.species.index(spec)
else:
raise ValueError(f"Invalid task: {self.task}")
if is_show_log:
print(f"Sampled tokens of len={len(seq)}: {seq[:10]}...{seq[-10:]}")
print(f"Sampled target: {target}")
return data, target
| hyena-dna-main | src/dataloaders/datasets/species_dataset.py |
from pathlib import Path
from pyfaidx import Fasta
import torch
"""
Just a fixed length dataset for 2 test chromosomes, to ensure the test set is the same.
"""
# helper functions
def exists(val):
return val is not None
class HG38FixedDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
fasta_file,
chr_ranges, # a dict of chr: (start, end) to use for test set
max_length,
pad_max_length=None,
tokenizer=None,
add_eos=False,
rc_aug=False, # not yet implemented
):
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer = tokenizer
self.add_eos = add_eos
# create a list of intervals from chr_ranges, from start to end of size max_length
self.intervals = self.create_fixed_intervals(chr_ranges, self.max_length)
# open fasta file
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file), sequence_always_upper=True)
def create_fixed_intervals(self, chr_ranges, max_length):
"""
This will create a new df with non-overlapping sequences of max length, which ensures that the test set is the same every epoch.
It loops thru the each chr and its start / end range, and creates a sample of max length.
"""
print("creating new test set with fixed intervals of max_length...")
intervals = []
# loop thru each chr in chr_ranges, and create intervals of max_length from start to end
for chr_name, (start, end) in chr_ranges.items():
# create a list of intervals from start to end of size max_length
for i in range(start, end, max_length):
interval_end = min(i + max_length, end)
intervals.append((chr_name, i, interval_end))
return intervals
def __len__(self):
return len(self.intervals)
def replace_value(self, x, old_value, new_value):
return torch.where(x == old_value, new_value, x)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
row = self.intervals[idx]
chr_name, start, end = (row[0], row[1], row[2])
seq = str(self.seqs[chr_name][start:end])
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# # remove first token
# seq = seq[1:]
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# replace N token with a pad token, so we can ignore it in the loss
seq = self.replace_value(seq, 11, self.tokenizer.pad_token_id)
data = seq[:-1].clone() # remove eos
target = seq[1:].clone() # offset by 1, includes eos
return data, target | hyena-dna-main | src/dataloaders/datasets/hg38_fixed_dataset.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from collections import Counter
from collections import OrderedDict
import torch
import src.utils as utils
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose:
print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
# Class OpenAIVocab has been adapted from
# https://github.com/cybertronai/transformer-xl/blob/master/utils/vocabulary.py
class OpenAIVocab(Vocab):
def __init__(self, max_size=None, vocab_file=None):
from transformers import GPT2Tokenizer
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
self.EOT = self.tokenizer.encoder['<|endoftext|>']
self.max_size = max_size
self.vocab_file = vocab_file
pad = 8
vocab_size = len(self.tokenizer)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:09d}'
self.tokenizer.add_tokens([token])
def __len__(self):
return len(self.tokenizer)
def count_file(self, path, verbose=False, add_eos=False):
# TODO: train from scratch, respect self.max_size
pass
def build_vocab(self):
pass
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False) -> torch.LongTensor:
cached = path + '.bpe'
if os.path.exists(cached):
return torch.load(cached)
print(f'encoding file {path} ...')
assert os.path.exists(path), f"{path} doesn't exist"
with open(path, encoding='utf-8') as f:
# Suppress warnings about length.
with open(os.devnull, "w") as devnull, contextlib.redirect_stderr(devnull):
out = torch.LongTensor(self.tokenizer.encode(f.read()) + [self.EOT])
with utils.distributed.sync_workers() as rank:
if rank == 0:
torch.save(out, cached)
return out
def tokenize(self, line, add_eos=False, add_double_eos=False):
return self.tokenizer.encode(line)
def convert_to_tensor(self, symbols):
return torch.LongTensor(symbols)
| hyena-dna-main | src/dataloaders/utils/vocabulary.py |
"""Utilities for special optimizer hyperparameters.
group_parameters_for_optimizer is a modification of timm's optimizer logic, which is currently unused
add_optimizer_hooks is an improved version that uses this codebase's _optim dictionary
"""
import inspect
import torch.nn as nn
import hydra
def add_optimizer_hooks(
model,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Separate out all parameters to those that will and won't experience regularizing weight decay
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
if (not bias_weight_decay and pn.endswith('bias')) \
or getattr(p, '_no_weight_decay', False) \
or isinstance(m, blacklist_weight_modules):
setattr(p, "_optim", {"weight_decay": 0.0})
def group_parameters_for_optimizer(
model,
optimizer_cfg,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Get the weight decay from the config, or from the default value of the optimizer constructor
# if it's not specified in the config.
if 'weight_decay' in optimizer_cfg:
weight_decay = optimizer_cfg.weight_decay
else:
# https://stackoverflow.com/questions/12627118/get-a-function-arguments-default-value
signature = inspect.signature(hydra.utils.get_class(optimizer_cfg._target_))
if 'weight_decay' in signature.parameters:
weight_decay = signature.parameters['weight_decay'].default
if weight_decay is inspect.Parameter.empty:
weight_decay = 0.0
else:
weight_decay = 0.0
# If none of the parameters have weight decay anyway, and there are no parameters with special
# optimization params
if weight_decay == 0.0 and not any(hasattr(p, '_optim') for p in model.parameters()):
return model.parameters()
skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay') else set()
skip_keywords = (model.no_weight_decay_keywords() if hasattr(model, 'no_weight_decay_keywords')
else set())
# Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L134
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
special = set()
whitelist_weight_modules = (nn.Linear, )
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if not p.requires_grad:
continue # frozen weights
if hasattr(p, '_optim'):
special.add(fpn)
elif fpn in skip or any(skip_keyword in fpn for skip_keyword in skip_keywords):
no_decay.add(fpn)
elif getattr(p, '_no_weight_decay', False):
no_decay.add(fpn)
elif not bias_weight_decay and pn.endswith('bias'):
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
# special case the position embedding parameter in the root GPT module as not decayed
if 'pos_emb' in param_dict:
no_decay.add('pos_emb')
# In case of parameter sharing, some parameters show up in decay but are not in param_dict.keys()
decay &= param_dict.keys()
decay |= (param_dict.keys() - no_decay - special)
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, f"Parameters {str(inter_params)} made it into both decay/no_decay sets!"
assert len(param_dict.keys() - special - union_params) == 0, f"parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
if weight_decay == 0.0 or not no_decay:
param_groups = [{"params": [param_dict[pn] for pn in sorted(list(no_decay | decay))],
"weight_decay": weight_decay}]
else:
param_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# Add parameters with special hyperparameters
# Unique dicts
hps = [dict(s) for s in set(frozenset(param_dict[pn]._optim.items()) for pn in special)]
for hp in hps:
params = [param_dict[pn] for pn in sorted(list(special)) if param_dict[pn]._optim == hp]
param_groups.append({"params": params, **hp})
return param_groups
| hyena-dna-main | src/utils/optim_groups.py |
""" Utilities for dealing with collection objects (lists, dicts) and configs """
from typing import Sequence, Mapping, Optional, Callable
import functools
import hydra
from omegaconf import ListConfig, DictConfig
# TODO this is usually used in a pattern where it's turned into a list, so can just do that here
def is_list(x):
return isinstance(x, Sequence) and not isinstance(x, str)
def is_dict(x):
return isinstance(x, Mapping)
def to_dict(x, recursive=True):
"""Convert Sequence or Mapping object to dict
lists get converted to {0: x[0], 1: x[1], ...}
"""
if is_list(x):
x = {i: v for i, v in enumerate(x)}
if is_dict(x):
if recursive:
return {k: to_dict(v, recursive=recursive) for k, v in x.items()}
else:
return dict(x)
else:
return x
def to_list(x, recursive=False):
"""Convert an object to list.
If Sequence (e.g. list, tuple, Listconfig): just return it
Special case: If non-recursive and not a list, wrap in list
"""
if is_list(x):
if recursive:
return [to_list(_x) for _x in x]
else:
return list(x)
else:
if recursive:
return x
else:
return [x]
def extract_attrs_from_obj(obj, *attrs):
if obj is None:
assert len(attrs) == 0
return []
return [getattr(obj, attr, None) for attr in attrs]
def auto_assign_attrs(cls, **kwargs):
for k, v in kwargs.items():
setattr(cls, k, v)
def instantiate(registry, config, *args, partial=False, wrap=None, **kwargs):
"""
registry: Dictionary mapping names to functions or target paths (e.g. {'model': 'models.SequenceModel'})
config: Dictionary with a '_name_' key indicating which element of the registry to grab, and kwargs to be passed into the target constructor
wrap: wrap the target class (e.g. ema optimizer or tasks.wrap)
*args, **kwargs: additional arguments to override the config to pass into the target constructor
"""
# Case 1: no config
if config is None:
return None
# Case 2a: string means _name_ was overloaded
if isinstance(config, str):
_name_ = None
_target_ = registry[config]
config = {}
# Case 2b: grab the desired callable from name
else:
_name_ = config.pop("_name_")
_target_ = registry[_name_]
# Retrieve the right constructor automatically based on type
if isinstance(_target_, str):
fn = hydra.utils.get_method(path=_target_)
elif isinstance(_target_, Callable):
fn = _target_
else:
raise NotImplementedError("instantiate target must be string or callable")
# Instantiate object
if wrap is not None:
fn = wrap(fn)
obj = functools.partial(fn, *args, **config, **kwargs)
# Restore _name_
if _name_ is not None:
config["_name_"] = _name_
if partial:
return obj
else:
return obj()
def get_class(registry, _name_):
return hydra.utils.get_class(path=registry[_name_])
def omegaconf_filter_keys(d, fn=None):
"""Only keep keys where fn(key) is True. Support nested DictConfig.
# TODO can make this inplace?
"""
if fn is None:
fn = lambda _: True
if is_list(d):
return ListConfig([omegaconf_filter_keys(v, fn) for v in d])
elif is_dict(d):
return DictConfig(
{k: omegaconf_filter_keys(v, fn) for k, v in d.items() if fn(k)}
)
else:
return d
| hyena-dna-main | src/utils/config.py |
optimizer = {
"adam": "torch.optim.Adam",
"adamw": "torch.optim.AdamW",
"rmsprop": "torch.optim.RMSprop",
"sgd": "torch.optim.SGD",
"lamb": "src.utils.optim.lamb.JITLamb",
}
scheduler = {
"constant": "transformers.get_constant_schedule",
"plateau": "torch.optim.lr_scheduler.ReduceLROnPlateau",
"step": "torch.optim.lr_scheduler.StepLR",
"multistep": "torch.optim.lr_scheduler.MultiStepLR",
"cosine": "torch.optim.lr_scheduler.CosineAnnealingLR",
"constant_warmup": "transformers.get_constant_schedule_with_warmup",
"linear_warmup": "transformers.get_linear_schedule_with_warmup",
"cosine_warmup": "transformers.get_cosine_schedule_with_warmup",
"cosine_warmup_timm": "src.utils.optim.schedulers.TimmCosineLRScheduler",
}
model = {
# Backbones from this repo
"model": "src.models.sequence.SequenceModel",
"lm": "src.models.sequence.long_conv_lm.ConvLMHeadModel",
"lm_simple": "src.models.sequence.simple_lm.SimpleLMHeadModel",
"vit_b_16": "src.models.baselines.vit_all.vit_base_patch16_224",
"dna_embedding": "src.models.sequence.dna_embedding.DNAEmbeddingModel",
"bpnet": "src.models.sequence.hyena_bpnet.HyenaBPNet"
}
layer = {
"id": "src.models.sequence.base.SequenceIdentity",
"ff": "src.models.sequence.ff.FF",
"mha": "src.models.sequence.mha.MultiheadAttention",
"s4d": "src.models.sequence.ssm.s4d.S4D",
"s4_simple": "src.models.sequence.ssm.s4_simple.SimpleS4Wrapper",
"long-conv": "src.models.sequence.long_conv.LongConv",
"h3": "src.models.sequence.h3.H3",
"h3-conv": "src.models.sequence.h3_conv.H3Conv",
"hyena": "src.models.sequence.hyena.HyenaOperator",
"hyena-filter": "src.models.sequence.hyena.HyenaFilter",
"vit": "src.models.sequence.mha.VitAttention",
}
callbacks = {
"timer": "src.callbacks.timer.Timer",
"params": "src.callbacks.params.ParamsLog",
"learning_rate_monitor": "pytorch_lightning.callbacks.LearningRateMonitor",
"model_checkpoint": "pytorch_lightning.callbacks.ModelCheckpoint",
"early_stopping": "pytorch_lightning.callbacks.EarlyStopping",
"swa": "pytorch_lightning.callbacks.StochasticWeightAveraging",
"rich_model_summary": "pytorch_lightning.callbacks.RichModelSummary",
"rich_progress_bar": "pytorch_lightning.callbacks.RichProgressBar",
"progressive_resizing": "src.callbacks.progressive_resizing.ProgressiveResizing",
"seqlen_warmup": "src.callbacks.seqlen_warmup.SeqlenWarmup",
"seqlen_warmup_reload": "src.callbacks.seqlen_warmup_reload.SeqlenWarmupReload",
"gpu_affinity": "src.callbacks.gpu_affinity.GpuAffinity"
}
model_state_hook = {
'load_backbone': 'src.models.sequence.long_conv_lm.load_backbone',
}
| hyena-dna-main | src/utils/registry.py |
from .config import is_list, is_dict, to_list, to_dict, get_class, instantiate
| hyena-dna-main | src/utils/__init__.py |
import math
import numpy as np
import torch
### Bit reversal permutation
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
def transpose_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices = indices.T
indices = indices.reshape(h*w)
return indices
def snake_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices[1::2, :] = indices[1::2, ::-1]
indices = indices.reshape(h*w)
return indices
def hilbert_permutation(n):
m = int(math.log2(n))
assert n == 2**m
inds = decode(list(range(n*n)), 2, m)
ind_x, ind_y = inds.T
indices = np.arange(n*n).reshape((n, n))
indices = indices[ind_x, ind_y]
return(indices)
""" Hilbert curve utilities taken from https://github.com/PrincetonLIPS/numpy-hilbert-curve """
def decode(hilberts, num_dims, num_bits):
''' Decode an array of Hilbert integers into locations in a hypercube.
This is a vectorized-ish version of the Hilbert curve implementation by John
Skilling as described in:
Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference
Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.
Params:
-------
hilberts - An ndarray of Hilbert integers. Must be an integer dtype and
cannot have fewer bits than num_dims * num_bits.
num_dims - The dimensionality of the hypercube. Integer.
num_bits - The number of bits for each dimension. Integer.
Returns:
--------
The output is an ndarray of unsigned integers with the same shape as hilberts
but with an additional dimension of size num_dims.
'''
if num_dims*num_bits > 64:
raise ValueError(
'''
num_dims=%d and num_bits=%d for %d bits total, which can't be encoded
into a uint64. Are you sure you need that many points on your Hilbert
curve?
''' % (num_dims, num_bits)
)
# Handle the case where we got handed a naked integer.
hilberts = np.atleast_1d(hilberts)
# Keep around the shape for later.
orig_shape = hilberts.shape
# Treat each of the hilberts as a sequence of eight uint8.
# This treats all of the inputs as uint64 and makes things uniform.
hh_uint8 = np.reshape(hilberts.ravel().astype('>u8').view(np.uint8), (-1, 8))
# Turn these lists of uints into lists of bits and then truncate to the size
# we actually need for using Skilling's procedure.
hh_bits = np.unpackbits(hh_uint8, axis=1)[:,-num_dims*num_bits:]
# Take the sequence of bits and Gray-code it.
gray = binary2gray(hh_bits)
# There has got to be a better way to do this.
# I could index them differently, but the eventual packbits likes it this way.
gray = np.swapaxes(
np.reshape(gray, (-1, num_bits, num_dims)),
axis1=1, axis2=2,
)
# Iterate backwards through the bits.
for bit in range(num_bits-1, -1, -1):
# Iterate backwards through the dimensions.
for dim in range(num_dims-1, -1, -1):
# Identify which ones have this bit active.
mask = gray[:,dim,bit]
# Where this bit is on, invert the 0 dimension for lower bits.
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], mask[:,np.newaxis])
# Where the bit is off, exchange the lower bits with the 0 dimension.
to_flip = np.logical_and(
np.logical_not(mask[:,np.newaxis]),
np.logical_xor(gray[:,0,bit+1:], gray[:,dim,bit+1:])
)
gray[:,dim,bit+1:] = np.logical_xor(gray[:,dim,bit+1:], to_flip)
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], to_flip)
# Pad back out to 64 bits.
extra_dims = 64 - num_bits
padded = np.pad(gray, ((0,0), (0,0), (extra_dims,0)),
mode='constant', constant_values=0)
# Now chop these up into blocks of 8.
locs_chopped = np.reshape(padded[:,:,::-1], (-1, num_dims, 8, 8))
# Take those blocks and turn them unto uint8s.
locs_uint8 = np.squeeze(np.packbits(locs_chopped, bitorder='little', axis=3))
# Finally, treat these as uint64s.
flat_locs = locs_uint8.view(np.uint64)
# Return them in the expected shape.
return np.reshape(flat_locs, (*orig_shape, num_dims))
def right_shift(binary, k=1, axis=-1):
''' Right shift an array of binary values.
Parameters:
-----------
binary: An ndarray of binary values.
k: The number of bits to shift. Default 1.
axis: The axis along which to shift. Default -1.
Returns:
--------
Returns an ndarray with zero prepended and the ends truncated, along
whatever axis was specified.
'''
# If we're shifting the whole thing, just return zeros.
if binary.shape[axis] <= k:
return np.zeros_like(binary)
# Determine the padding pattern.
padding = [(0,0)] * len(binary.shape)
padding[axis] = (k,0)
# Determine the slicing pattern to eliminate just the last one.
slicing = [slice(None)] * len(binary.shape)
slicing[axis] = slice(None, -k)
shifted = np.pad(binary[tuple(slicing)], padding,
mode='constant', constant_values=0)
return shifted
def binary2gray(binary, axis=-1):
''' Convert an array of binary values into Gray codes.
This uses the classic X ^ (X >> 1) trick to compute the Gray code.
Parameters:
-----------
binary: An ndarray of binary values.
axis: The axis along which to compute the gray code. Default=-1.
Returns:
--------
Returns an ndarray of Gray codes.
'''
shifted = right_shift(binary, axis=axis)
# Do the X ^ (X >> 1) trick.
gray = np.logical_xor(binary, shifted)
return gray
| hyena-dna-main | src/utils/permutations.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
import torch
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
torch.distributed.init_process_group(backend=backend,
init_method='env://')
assert torch.distributed.is_initialized()
return distributed
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def all_reduce_item(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor.item()
else:
ret = value
return ret
def all_reduce_tensor(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = value
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor
else:
ret = value
return ret
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
| hyena-dna-main | src/utils/distributed.py |
""" Utils for the training loop. Copied from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py """
import logging
import os
import warnings
from typing import List, Sequence
import torch.nn as nn
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from src.utils.config import omegaconf_filter_keys
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
def process_config(config: DictConfig) -> DictConfig: # TODO because of filter_keys, this is no longer in place
"""A couple of optional utilities, controlled by main config file:
- disabling warnings
- easier access to debug mode
- forcing debug friendly configuration
Modifies DictConfig in place.
Args:
config (DictConfig): Configuration composed by Hydra.
"""
log = get_logger()
# Filter out keys that were used just for interpolation
# config = dictconfig_filter_keys(config, lambda k: not k.startswith('__'))
config = omegaconf_filter_keys(config, lambda k: not k.startswith('__'))
# enable adding new keys to config
OmegaConf.set_struct(config, False)
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
if config.get("debug"):
log.info("Running in debug mode! <config.debug=True>")
config.trainer.fast_dev_run = True
# force debugger friendly configuration
log.info("Forcing debugger friendly configuration! <config.trainer.fast_dev_run=True>")
# Debuggers don't like GPUs or multiprocessing
if config.trainer.get("gpus"):
config.trainer.gpus = 0
if config.loader.get("pin_memory"):
config.loader.pin_memory = False
if config.loader.get("num_workers"):
config.loader.num_workers = 0
# disable adding new keys to config
# OmegaConf.set_struct(config, True) # [21-09-17 AG] I need this for .pop(_name_) pattern among other things
return config
@rank_zero_only
def print_config(
config: DictConfig,
resolve: bool = True,
save_cfg=True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
fields = config.keys()
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
if save_cfg:
with open("config_tree.txt", "w") as fp:
rich.print(tree, file=fp)
def log_optimizer(logger, optimizer, keys):
""" Log values of particular keys from the optimizer's param groups """
keys = sorted(keys)
for i, g in enumerate(optimizer.param_groups):
group_hps = {k: g.get(k, None) for k in keys}
logger.info(' | '.join([
f"Optimizer group {i}",
f"{len(g['params'])} tensors",
] + [f"{k} {v}" for k, v in group_hps.items()]))
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim) | hyena-dna-main | src/utils/train.py |
import torch
import torch.utils.benchmark as benchmark
def _get_gpu_mem(synchronize=True, empty_cache=True):
return torch.cuda.memory_allocated() / (
(2**20) * 1000
), torch.cuda.memory_cached() / ((2**20) * 1000)
def _generate_mem_hook(handle_ref, mem, idx, hook_type, exp):
def hook(self, *args):
if len(mem) == 0 or mem[-1]["exp"] != exp:
call_idx = 0
else:
call_idx = mem[-1]["call_idx"] + 1
mem_all, mem_cached = _get_gpu_mem()
torch.cuda.synchronize()
mem.append(
{
"layer_idx": idx,
"call_idx": call_idx,
"layer_type": type(self).__name__,
"exp": exp,
"hook_type": hook_type,
"mem_all": mem_all,
"mem_cached": mem_cached,
}
)
return hook
def _add_memory_hooks(idx, model, mem_log, exp, hr):
h = model.register_forward_pre_hook(
_generate_mem_hook(hr, mem_log, idx, "pre", exp)
)
hr.append(h)
h = model.register_forward_hook(_generate_mem_hook(hr, mem_log, idx, "fwd", exp))
hr.append(h)
h = model.register_backward_hook(_generate_mem_hook(hr, mem_log, idx, "bwd", exp))
hr.append(h)
def log_memory(model, inp, mem_log=None, exp=None):
mem_log = mem_log or []
exp = exp or f"exp_{len(mem_log)}"
hr = []
for idx, module in enumerate(model.modules()):
_add_memory_hooks(idx, module, mem_log, exp, hr)
out = model(inp)
if type(out) == tuple:
out = out[0].logits
loss = out.sum()
loss.backward()
[h.remove() for h in hr]
return mem_log
def benchmark_forward(
fn, *inputs, min_run_time=0.2, repeats=10, desc="", verbose=True, **kwinputs
):
"""Use Pytorch Benchmark on the forward pass of an arbitrary function."""
if verbose:
print(desc, "- Forward pass")
t = benchmark.Timer(
stmt="fn(*inputs, **kwinputs)",
globals={"fn": fn, "inputs": inputs, "kwinputs": kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_memory(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
fn(*inputs, **kwinputs)
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
def benchmark_memory_bwd(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
for input in inputs:
input = input.requires_grad_(True)
torch.cuda.synchronize()
y = fn(*inputs, **kwinputs)
y.sum().backward()
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
def benchmark_backward(
fn, *inputs, grad=None, repeats=10, desc="", verbose=True, **kwinputs
):
"""Use Pytorch Benchmark on the backward pass of an arbitrary function."""
if verbose:
print(desc, "- Backward pass")
y = fn(*inputs, **kwinputs)
if not hasattr(y, "shape"):
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError("Grad shape does not match output shape")
t = benchmark.Timer(
stmt="y.backward(grad, retain_graph=True)",
globals={"y": y, "grad": grad},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
| hyena-dna-main | src/utils/profiling.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Lamb optimizer."""
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.norm(p=2).clamp_(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.norm(p=2)
if weight_norm == 0.0 or adam_norm == 0.0:
trust_ratio = 1
else:
trust_ratio = weight_norm / (adam_norm + group['eps'])
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
@torch.jit.script
def lamb_kernel(param, grad, exp_avg, exp_avg_sq, beta1: float,
beta2: float, step_size: float, eps: float, weight_decay: float):
exp_avg = exp_avg * beta1 + (1 - beta1) * grad
exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad)
adam_step = exp_avg / (exp_avg_sq.sqrt() + eps)
adam_step = adam_step + weight_decay * param
weight_norm = param.norm(p=2).clamp(0, 10)
adam_norm = adam_step.norm(p=2)
trust_ratio = weight_norm / (adam_norm + eps)
trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio
trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio
trust_ratio = trust_ratio.float()
param = param - step_size * trust_ratio * adam_step
return param, exp_avg, exp_avg_sq
class JITLamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
step_size = group['lr']
param, exp_avg, exp_avg_sq = lamb_kernel(p.data, grad, exp_avg,
exp_avg_sq, beta1,
beta2, step_size,
group['eps'],
group['weight_decay'],
)
state['exp_avg'] = exp_avg
state['exp_avg_sq'] = exp_avg_sq
p.data = param
return loss
| hyena-dna-main | src/utils/optim/lamb.py |
"""Custom learning rate schedulers"""
import math
import warnings
import torch
from timm.scheduler import CosineLRScheduler
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, warmup_step=0, **kwargs):
self.warmup_step = warmup_step
super().__init__(optimizer, T_max - warmup_step, eta_min, *kwargs)
# Copied from CosineAnnealingLR, but adding warmup and changing self.last_epoch to
# self.last_epoch - self.warmup_step.
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == self.warmup_step: # also covers the case where both are 0
return self.base_lrs
elif self.last_epoch < self.warmup_step:
return [base_lr * (self.last_epoch + 1) / self.warmup_step for base_lr in self.base_lrs]
elif (self.last_epoch - self.warmup_step - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step) / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
_get_closed_form_lr = None
def InvSqrt(optimizer, warmup_step):
""" Originally used for Transformer (in Attention is all you need)
"""
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. / (step ** 0.5) if step > warmup_step else (step + 1) / (warmup_step ** 1.5)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
def Constant(optimizer, warmup_step):
def lr_lambda(step):
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. if step > warmup_step else (step + 1) / warmup_step
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
""" Wrap timm.scheduler.CosineLRScheduler so we can call scheduler.step() without passing in epoch.
It supports resuming as well.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_epoch = -1
self.step(epoch=0)
def step(self, epoch=None):
if epoch is None:
self._last_epoch += 1
else:
self._last_epoch = epoch
# We call either step or step_update, depending on whether we're using the scheduler every
# epoch or every step.
# Otherwise, lightning will always call step (i.e., meant for each epoch), and if we set
# scheduler interval to "step", then the learning rate update will be wrong.
if self.t_in_epochs:
super().step(epoch=self._last_epoch)
else:
super().step_update(num_updates=self._last_epoch)
| hyena-dna-main | src/utils/optim/schedulers.py |
""" Implementations of different types of residual functions. """
import torch
from torch import nn
class Residual(nn.Module):
""" Residual connection with constant affine weights. Can simulate standard residual, no residual, and "constant gates". """
def __init__(self, i_layer, d_input, d_model, alpha=1.0, beta=1.0):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__()
assert (d_input == d_model) or alpha == 0.0
self.i_layer = i_layer
self.d_input = d_input
self.d_model = d_model
self.alpha = alpha
self.beta = beta
@property
def d_output(self):
return self.d_model
def forward(self, x, y, transposed): # TODO documentation of transposed
y = self.beta*y if self.beta != 1.0 else y
return self.alpha * x + y if self.alpha else y
class Affine(Residual):
""" Residual connection with learnable scalar multipliers on the main branch
scalar: Single scalar multiplier, or one per dimension
scale, power: Initialize to scale * layer_num**(-power)
"""
def __init__(self, *args, scalar=True, gamma=0.0, **kwargs):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__(*args, **kwargs)
self.scalar = scalar
self.gamma = gamma
c = self.beta * self.i_layer ** (-self.gamma)
d = 1 if self.scalar else self.d_input
self.affine = nn.Parameter(c * torch.ones(d))
def forward(self, x, y, transposed): # TODO documentation of transposed
c = self.affine
if transposed: c = c.unsqueeze(-1)
return self.alpha * x + c * y
class Feedforward(Residual):
def __init__(self, *args):
# print("Feedforward extra kwargs", kwargs)
super().__init__(*args, alpha=0.0, beta=1.0)
class Highway(Residual):
def __init__(self, *args, scaling_correction=False, elemwise=False):
super().__init__(*args)
self.scaling_correction = 1.732 if scaling_correction else 1.0 # TODO
self.elemwise = elemwise
self.Wx = nn.Linear(self.d_input, self.d_input)
if self.elemwise:
self.Wy = nn.Parameter(torch.randn(self.d_input))
else:
self.Wy = nn.Linear(self.d_input, self.d_input)
def forward(self, x, y, transposed=False): # TODO handle this case
if self.elemwise:
y = self.Wy * y
else:
y = self.Wy(y)
r = torch.sigmoid(self.Wx(x) + y)
z = self.scaling_correction * (1.-r) * x + r * y
return z
class DecayResidual(Residual):
""" Residual connection that can decay the linear combination depending on depth. """
def __init__(self, *args, power=0.5, l2=True):
# print("DecayResidual extra kwargs", kwargs)
super().__init__(*args)
self.power = power
self.l2 = l2
def forward(self, x, y, transposed):
beta = self.i_layer ** (-self.power)
if self.l2:
alpha = (1. - beta**2)**0.5
else:
alpha = 1. - beta
return alpha * x + beta * y
registry = {
'F': Feedforward,
'N': Feedforward,
'R': Residual,
'H': Highway,
'D': DecayResidual,
'A': Affine,
'none': Feedforward,
'ff': Feedforward,
'feedforward': Feedforward,
'residual': Residual,
'highway': Highway,
'decay': DecayResidual,
'affine': Affine,
}
| hyena-dna-main | src/models/nn/residual.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
# First term accounts for cluster probabilities
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0) # TODO This should be a bug in the original implementation; it should go into the continue case above as well
return nll.mean() # TODO maybe cases for length or padding_mask
def compute_logits(self, hidden):
"""Compute full vector of logits
Adapted from https://github.com/kimiyoung/transformer-xl/issues/88
"""
hidden = hidden.reshape(-1, hidden.size(-1))
if self.n_clusters == 0:
logits = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
return logits
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
out_full_logps = [head_logprob[:, :self.cutoffs[0]]]
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(1, len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
head_logprob_i = head_logprob # .index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden # .index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i].view(-1, 1) + tail_logprob_i
offset += logprob_i.size(0)
out_full_logps.append(logprob_i)
out_full_logps = torch.cat(out_full_logps, dim = 1)
# print(torch.sum(out_full_ps), out_full_ps.shape)
return out_full_logps
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.reshape(-1)
# Changes from original impl
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
| hyena-dna-main | src/models/nn/adaptive_softmax.py |
from .components import LinearActivation, Activation, Normalization, DropoutNd
| hyena-dna-main | src/models/nn/__init__.py |
""" Utility wrappers around modules to let them handle Args and extra arguments """
import inspect
from functools import wraps
import torch
from torch import nn
def wrap_kwargs(f):
"""
Given a callable f that can consume some named arguments,
wrap it with a kwargs that passes back any unused args
EXAMPLES
--------
Basic usage:
def foo(x, y=None):
return x
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
--------
The wrapped function can return its own argument dictionary,
which gets merged with the new kwargs.
def foo(x, y=None):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'z': 2})
def foo(x, y=None):
return x, {"y": y, "z": None}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {'y': 1, 'z': 2})
--------
The wrapped function can have its own kwargs parameter:
def foo(x, y=None, **kw_args):
return x, {}
wrap_kwargs(foo)(0, y=1, z=2) == (0, {})
--------
Partial functions and modules work automatically:
class Module:
def forward(self, x, y=0):
return x, {"y": y+1}
m = Module()
wrap_kwargs(m.forward)(0, y=1, z=2) == (0, {'y': 2, 'z': 2})
"""
sig = inspect.signature(f)
# Check if f already has kwargs
has_kwargs = any([
param.kind == inspect.Parameter.VAR_KEYWORD
for param in sig.parameters.values()
])
if has_kwargs:
@wraps(f)
def f_kwargs(*args, **kwargs):
y = f(*args, **kwargs)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return y
else:
return y, {}
else:
param_kwargs = inspect.Parameter("kwargs", kind=inspect.Parameter.VAR_KEYWORD)
sig_kwargs = inspect.Signature(parameters=list(sig.parameters.values())+[param_kwargs])
@wraps(f)
def f_kwargs(*args, **kwargs):
bound = sig_kwargs.bind(*args, **kwargs)
if "kwargs" in bound.arguments:
kwargs = bound.arguments.pop("kwargs")
else:
kwargs = {}
y = f(**bound.arguments)
if isinstance(y, tuple) and isinstance(y[-1], dict):
return *y[:-1], {**y[-1], **kwargs}
else:
return y, kwargs
return f_kwargs
def discard_kwargs(f):
if f is None: return None
f_kwargs = wrap_kwargs(f)
@wraps(f)
def f_(*args, **kwargs):
return f_kwargs(*args, **kwargs)[0]
return f_
def PassthroughSequential(*modules):
"""Special Sequential module that chains kwargs.
Semantics are the same as nn.Sequential, with extra convenience features:
- Discard None modules
- Flatten inner Sequential modules
- In case with 0 or 1 Module, rename the class for ease of inspection
"""
def flatten(module):
if isinstance(module, nn.Sequential):
return sum([flatten(m) for m in module], [])
else:
return [module]
modules = flatten(nn.Sequential(*modules))
modules = [module for module in modules if module if not None]
class Sequential(nn.Sequential):
def forward(self, x, **kwargs):
for layer in self:
x, kwargs = wrap_kwargs(layer.forward)(x, **kwargs)
return x, kwargs
def step(self, x, **kwargs):
for layer in self:
fn = getattr(layer, "step", layer.forward)
x, kwargs = wrap_kwargs(fn)(x, **kwargs)
return x, kwargs
if len(modules) == 0:
Sequential.__name__ = "Identity"
elif len(modules) == 1:
Sequential.__name__ = type(modules[0]).__name__
return Sequential(*modules)
| hyena-dna-main | src/models/nn/utils.py |
""" Defines flexible gating mechanisms based on ideas from LSSL paper and UR-LSTM paper https://arxiv.org/abs/1910.09890 """
import torch
import torch.nn as nn
class Gate(nn.Module):
""" Implements gating mechanisms. TODO update this with more detailed description with reference to LSSL paper when it's on arxiv
Mechanisms:
N - No gate
G - Standard sigmoid gate
UR - Uniform refine gates
R - Refine gate
FS - Forward discretization, Sigmoid activation [equivalent to G]
BE - Backward discretization, Exp activation [equivalent to G]
BR - Backward discretization, Relu activation
TE - Trapezoid discretization, Exp activation
TR - Trapezoid discretization, Relu activation
TS - Trapezoid discretization, Sigmoid activation (0 to 2)
"""
def __init__(self, size, preact_ctor, preact_args, mechanism='N'):
super().__init__()
self.size = size
self.mechanism = mechanism
if self.mechanism == 'N':
pass
elif self.mechanism in ['G', 'FS', 'BE', 'BR', 'TE', 'TR', 'TS', 'ZE', 'ZR', 'ZS']:
self.W_g = preact_ctor(*preact_args)
elif self.mechanism in ['U', 'UT']:
self.W_g = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'UR':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
b_g_unif = torch.empty(size)
torch.nn.init.uniform_(b_g_unif, 1./self.size, 1.-1./self.size)
self.b_g = nn.Parameter(torch.log(1./b_g_unif-1.).detach(), requires_grad=False)
elif self.mechanism == 'R':
self.W_g = preact_ctor(*preact_args)
self.W_r = preact_ctor(*preact_args)
elif self.mechanism in ['GT']:
self.W_g = preact_ctor(*preact_args)
else:
assert False, f'Gating type {self.mechanism} is not supported.'
def forward(self, *inputs):
if self.mechanism == 'N':
return 1.0
if self.mechanism == 'G':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
if self.mechanism == 'U':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
elif self.mechanism == 'UR':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'R':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = torch.sigmoid(self.W_r(*inputs))
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'UT':
g_preact = self.W_g(*inputs) + self.b_g
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
elif self.mechanism == 'GT':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
r = g
g = (1-2*r)*g**2 + 2*r*g
else:
g_preact = self.W_g(*inputs)
# if self.mechanism[1] == 'S':
# g = torch.sigmoid(g_preact)
# elif self.mechanism[1] == 'E':
# g = torch.exp(g_preact)
# elif self.mechanism[1] == 'R':
# g = torch.relu(g_preact)
if self.mechanism == 'FS':
g = torch.sigmoid(g_preact)
g = self.forward_diff(g)
elif self.mechanism == 'BE':
g = torch.exp(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'BR':
g = torch.relu(g_preact)
g = self.backward_diff(g)
elif self.mechanism == 'TS':
g = 2 * torch.sigmoid(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TE':
g = torch.exp(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'TR':
g = torch.relu(g_preact)
g = self.trapezoid(g)
elif self.mechanism == 'ZE':
g = torch.exp(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZR':
g = torch.relu(g_preact)
g = self.zoh(g)
elif self.mechanism == 'ZS':
g = torch.sigmoid(g_preact)
g = self.zoh(g)
return g
def forward_diff(self, x):
return x
def backward_diff(self, x):
return x / (1+x)
def trapezoid(self, x):
return x / (1 + x/2)
def zoh(self, x):
return 1 - torch.exp(-x)
| hyena-dna-main | src/models/nn/gate.py |
"""Implementations of several types of Discrete Sin/Cosine Transforms with various reductions to FFT.
Currently not used by S4
"""
import torch
import torch.nn as nn
import numpy as np
import scipy.fft
from einops import rearrange, repeat
class DCT(nn.Module):
""" Reductions adapted from https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft """
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = scipy.fft.dct(np.eye(N), norm=norm, type=2).T
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
""" Baseline DCT type II - matmul by DCT matrix """
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
""" DCT type II - reduction to FFT size 4N """
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
z = torch.zeros_like(x)
x = torch.stack([z, x], dim=-1)
x = x.view(x.shape[:-2] + (-1,))
y = torch.fft.fft(x)
y = y[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
""" DCT type II - reduction to FFT size 2N mirrored
The reduction from the DSP forum is not quite correct in the complex input case.
halfshift(FFT[a, b, c, d, d, c, b, a]) -> [A, B, C, D, 0, -D, -C, -B]
In the case of real input, the intermediate step after FFT has form [A, B, C, D, 0, D*, C*, B*]
"""
assert self.N == x.shape[-1]
x = torch.cat([x, x.flip(-1)], dim=-1)
y = torch.fft.fft(x)[..., :self.N]
y = y * self.Q
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
x = torch.cat([x[..., 0::2], x[..., 1::2].flip(-1)], dim=-1)
y = torch.fft.fft(x)
y = y * 2 * self.Q
if torch.is_complex(x):
y = torch.cat([y[..., :1], (y[..., 1:] + 1j * y[..., 1:].flip(-1)) / 2], dim=-1) # TODO in-place sum
else:
y = torch.real(y)
return y
class IDCT(nn.Module):
def __init__(self, N, norm='backward'):
super().__init__()
self.N = N
# Materialize DCT matrix
P = np.linalg.inv(scipy.fft.dct(np.eye(N), norm=norm, type=2).T)
P = torch.tensor(P, dtype=torch.float)
self.register_buffer('P', P)
# TODO take care of normalization
Q = np.exp(-1j * np.pi / (2 * self.N) * np.arange(2*self.N))
Q = torch.tensor(Q, dtype=torch.cfloat)
self.register_buffer('Q', Q) # half shift
def forward(self, x, mode=2):
if mode == 0:
return self.forward_dense(x)
elif mode == 1:
return self.forward_n(x)
elif mode == 2:
return self.forward_2n(x)
elif mode == 4:
return self.forward_4n(x)
def forward_dense(self, x):
""" Baseline DCT type II - matmul by DCT matrix """
y = (self.P.to(x) @ x.unsqueeze(-1)).squeeze(-1)
return y
def forward_4n(self, x):
""" DCT type II - reduction to FFT size 4N """
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x.flip(-1), -x[..., 1:], z, x[..., 1:].flip(-1)], dim=-1)
y = torch.fft.ifft(x)
y = y[..., 1:2*self.N:2]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_2n(self, x):
""" DCT type II - reduction to FFT size 2N mirrored """
assert self.N == x.shape[-1]
z = x.new_zeros(x.shape[:-1] + (1,))
x = torch.cat([x, z, -x[..., 1:].flip(-1)], dim=-1)
x = x / self.Q
y = torch.fft.ifft(x)[..., :self.N]
if torch.is_complex(x):
return y
else:
return torch.real(y)
def forward_n(self, x):
""" DCT type II - reduction to size N """
assert self.N == x.shape[-1]
raise NotImplementedError # Straightforward by inverting operations of DCT-II reduction
def test_dct_ii():
N = 8
dct = DCT(N)
baseline = dct.forward_dense
methods = [dct.forward_4n, dct.forward_2n, dct.forward_n]
# Real case
print("DCT-II Real input")
x = torch.randn(1, N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
# Complex case
print("DCT-II Complex input")
x = torch.randn(N) + 1j * torch.randn(N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
def test_dct_iii():
N = 8
dct = IDCT(N)
baseline = dct.forward_dense
methods = [dct.forward_4n, dct.forward_2n]
# Real case
print("DCT-III Real input")
x = torch.randn(1, N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
# Complex case
print("DCT-III Complex input")
# x = torch.randn(N) + 1j * torch.randn(N)
x = 1j * torch.ones(N)
y = baseline(x)
print(y)
for fn in methods:
y_ = fn(x)
print("err", torch.norm(y-y_))
| hyena-dna-main | src/models/nn/dxt.py |
""" Utility nn components, in particular handling activations, initializations, and normalization layers """
from functools import partial
import math
from typing import ForwardRef
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from opt_einsum import contract
def stochastic_depth(input: torch.tensor, p: float, mode: str, training: bool = True):
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
if mode not in ["batch", "row"]:
raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate).div_(survival_rate)
return input * noise
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
# TODO(karan): need to upgrade to torchvision==0.11.0 to use StochasticDepth directly
# from torchvision.ops import StochasticDepth
super().__init__()
self.p = p
self.mode = mode
def forward(self, input):
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + '('
tmpstr += 'p=' + str(self.p)
tmpstr += ', mode=' + str(self.mode)
tmpstr += ')'
return tmpstr
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, X):
""" X: (batch, dim, lengths...) """
if self.training:
if not self.transposed: X = rearrange(X, 'b d ... -> b ... d')
# binomial = torch.distributions.binomial.Binomial(probs=1-self.p) # This is incredibly slow
mask_shape = X.shape[:2] + (1,)*(X.ndim-2) if self.tie else X.shape
# mask = self.binomial.sample(mask_shape)
mask = torch.rand(*mask_shape, device=X.device) < 1.-self.p
X = X * mask * (1.0/(1-self.p))
if not self.transposed: X = rearrange(X, 'b ... d -> b d ...')
return X
return X
def Activation(activation=None, size=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
elif activation == 'softplus':
return nn.Softplus()
elif activation in ['sqrelu', 'relu2']:
return SquaredReLU()
elif activation == 'laplace':
return Laplace()
elif activation == 'ln':
return TransposedLN(dim)
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def get_initializer(name, activation=None):
if activation in [ None, 'id', 'identity', 'linear' ]:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
elif activation in ['gelu', 'swish', 'silu']:
nonlinearity = 'relu' # Close to ReLU so approximate with ReLU's gain
else:
raise NotImplementedError(f"get_initializer: activation {activation} not supported")
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
raise NotImplementedError(f"get_initializer: initializer type {name} not supported")
return initializer
def LinearActivation(
d_input, d_output, bias=True,
zero_bias_init=False,
transposed=False,
initializer=None,
activation=None,
activate=False, # Apply activation as part of this module
weight_norm=False,
**kwargs,
):
""" Returns a linear nn.Module with control over axes order, initialization, and activation """
# Construct core module
# linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
linear_cls = TransposedLinear if transposed else nn.Linear
if activation == 'glu': d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
# Initialize weight
if initializer is not None:
get_initializer(initializer, activation)(linear.weight)
# Initialize bias
if bias and zero_bias_init:
nn.init.zeros_(linear.bias)
# Weight norm
if weight_norm:
linear = nn.utils.weight_norm(linear)
if activate and activation is not None:
activation = Activation(activation, d_output, dim=1 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class SquaredReLU(nn.Module):
def forward(self, x):
# return F.relu(x)**2
return torch.square(F.relu(x)) # Could this be faster?
def laplace(x, mu=0.707107, sigma=0.282095):
x = (x - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(x))
class Laplace(nn.Module):
def __init__(self, mu=0.707107, sigma=0.282095):
super().__init__()
self.mu = mu
self.sigma = sigma
def forward(self, x):
return laplace(x, mu=self.mu, sigma=self.sigma)
class TransposedLinear(nn.Module):
""" Linear module on the second-to-last dimension
Assumes shape (B, D, L), where L can be 1 or more axis
"""
def __init__(self, d_input, d_output, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.empty(d_output, d_input))
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # nn.Linear default init
# nn.init.kaiming_uniform_(self.weight, nonlinearity='linear') # should be equivalent
if bias:
self.bias = nn.Parameter(torch.empty(d_output))
bound = 1 / math.sqrt(d_input)
nn.init.uniform_(self.bias, -bound, bound)
setattr(self.bias, "_optim", {"weight_decay": 0.0})
else:
self.bias = 0.0
def forward(self, x):
num_axis = len(x.shape[2:]) # num_axis in L, for broadcasting bias
y = contract('b u ..., v u -> b v ...', x, self.weight) + self.bias.view(-1, *[1]*num_axis)
return y
class TransposedLN(nn.Module):
""" LayerNorm module over second dimension
Assumes shape (B, D, L), where L can be 1 or more axis
This is slow and a dedicated CUDA/Triton implementation shuld provide substantial end-to-end speedup
"""
def __init__(self, d, scalar=True):
super().__init__()
self.scalar = scalar
if self.scalar:
self.m = nn.Parameter(torch.zeros(1))
self.s = nn.Parameter(torch.ones(1))
setattr(self.m, "_optim", {"weight_decay": 0.0})
setattr(self.s, "_optim", {"weight_decay": 0.0})
else:
self.ln = nn.LayerNorm(d)
def forward(self, x):
if self.scalar:
# calc. stats over D dim / channels
s, m = torch.std_mean(x, dim=1, unbiased=False, keepdim=True)
y = (self.s/s) * (x-m+self.m)
else:
# move channel to last axis, apply layer_norm, then move channel back to second axis
_x = self.ln(rearrange(x, 'b d ... -> b ... d'))
y = rearrange(_x, 'b ... d -> b d ...')
return y
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_='layer',
**kwargs
):
super().__init__()
self.transposed = transposed
self._name_ = _name_
if _name_ == 'layer':
self.channel = True # Normalize over channel dimension
if self.transposed:
self.norm = TransposedLN(d, **kwargs)
else:
self.norm = nn.LayerNorm(d, **kwargs)
elif _name_ == 'instance':
self.channel = False
norm_args = {'affine': False, 'track_running_stats': False}
norm_args.update(kwargs)
self.norm = nn.InstanceNorm1d(d, **norm_args) # (True, True) performs very poorly
elif _name_ == 'batch':
self.channel = False
norm_args = {'affine': True, 'track_running_stats': True}
norm_args.update(kwargs)
self.norm = nn.BatchNorm1d(d, **norm_args)
elif _name_ == 'group':
self.channel = False
self.norm = nn.GroupNorm(1, d, *kwargs)
elif _name_ == 'none':
self.channel = True
self.norm = nn.Identity()
else: raise NotImplementedError
def forward(self, x):
# Handle higher dimension logic
shape = x.shape
if self.transposed:
x = rearrange(x, 'b d ... -> b d (...)')
else:
x = rearrange(x, 'b ... d -> b (...)d ')
# The cases of LayerNorm / no normalization are automatically handled in all cases
# Instance/Batch Norm work automatically with transposed axes
if self.channel or self.transposed:
x = self.norm(x)
else:
x = x.transpose(-1, -2)
x = self.norm(x)
x = x.transpose(-1, -2)
x = x.view(shape)
return x
def step(self, x, **kwargs):
assert self._name_ in ["layer", "none"]
if self.transposed: x = x.unsqueeze(-1)
x = self.forward(x)
if self.transposed: x = x.squeeze(-1)
return x
class TSNormalization(nn.Module):
def __init__(self, method, horizon):
super().__init__()
self.method = method
self.horizon = horizon
def forward(self, x):
# x must be BLD
if self.method == 'mean':
self.scale = x.abs()[:, :-self.horizon].mean(dim=1)[:, None, :]
return x / self.scale
elif self.method == 'last':
self.scale = x.abs()[:, -self.horizon-1][:, None, :]
return x / self.scale
return x
class TSInverseNormalization(nn.Module):
def __init__(self, method, normalizer):
super().__init__()
self.method = method
self.normalizer = normalizer
def forward(self, x):
if self.method == 'mean' or self.method == 'last':
return x * self.normalizer.scale
return x
class ReversibleInstanceNorm1dInput(nn.Module):
def __init__(self, d, transposed=False):
super().__init__()
# BLD if transpoed is False, otherwise BDL
self.transposed = transposed
self.norm = nn.InstanceNorm1d(d, affine=True, track_running_stats=False)
def forward(self, x):
# Means, stds
if not self.transposed:
x = x.transpose(-1, -2)
self.s, self.m = torch.std_mean(x, dim=-1, unbiased=False, keepdim=True)
self.s += 1e-4
x = (x - self.m) / self.s
# x = self.norm.weight.unsqueeze(-1) * x + self.norm.bias.unsqueeze(-1)
if not self.transposed:
return x.transpose(-1, -2)
return x
class ReversibleInstanceNorm1dOutput(nn.Module):
def __init__(self, norm_input):
super().__init__()
self.transposed = norm_input.transposed
self.weight = norm_input.norm.weight
self.bias = norm_input.norm.bias
self.norm_input = norm_input
def forward(self, x):
if not self.transposed:
x = x.transpose(-1, -2)
# x = (x - self.bias.unsqueeze(-1))/self.weight.unsqueeze(-1)
x = x * self.norm_input.s + self.norm_input.m
if not self.transposed:
return x.transpose(-1, -2)
return x
| hyena-dna-main | src/models/nn/components.py |
# Copyright (c) 2023, Tri Dao, Dan Fu.
# Simplified, mostly standalone version of LongConvLM for synthetics.
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import StochasticDepth
from einops import rearrange
from src.utils import instantiate
import src.utils.registry as registry
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, causal=None, key_padding_mask=None):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
causal: if passed, will override self.causal
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
False means to mask out. (B, S)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
causal = self.causal if causal is None else causal
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if key_padding_mask is not None:
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
device=scores.device)
padding_mask.masked_fill_(key_padding_mask, 0.0)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, bias=True, dropout=0.0,
softmax_scale=None, causal=False, layer_idx=None, dwconv=False,return_residual=False,device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.layer_idx = layer_idx
self.dwconv = dwconv
self.return_residual = return_residual
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
linear_cls = nn.Linear
linear_resid_cls = LinearResidual
inner_attn_cls = SelfAttention
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, key_padding_mask=None, **kwargs):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
is the is the sum of the sequence lengths in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into x. Only applicable when using
FlashAttention.
max_seqlen: int. Maximum sequence length in the batch.
key_padding_mask: boolean mask, True means to keep, False means to mask out.
(batch, seqlen). Only applicable when not using FlashAttention.
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
inference_params: for generation. Adapted from Megatron-LM (and Apex)
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
"""
kwargs = ({'key_padding_mask': key_padding_mask, **kwargs})
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, '... (three h d) -> ... three h d', three=3, d=self.head_dim)
context = self.inner_attn(qkv, **kwargs)
out = self.out_proj(rearrange(context, '... h d -> ... (h d)'))
return out if not self.return_residual else (out, x)
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
word_embed_proj_dim=None, device=None, dtype=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
the project up to embed_dim
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if word_embed_proj_dim is None:
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx,
**factory_kwargs)
self.project_in = None
else:
self.word_embeddings = nn.Embedding(vocab_size, word_embed_proj_dim,
padding_idx=padding_idx, **factory_kwargs)
self.project_in = nn.Linear(word_embed_proj_dim, embed_dim, bias=False,
**factory_kwargs)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim,
**factory_kwargs)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
position_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
embeddings = self.word_embeddings(input_ids)
if self.project_in is not None:
embeddings = self.project_in(embeddings)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
return_residual=False, device=None, dtype=None):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/mlp.py
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
y = self.fc1(x)
y = self.activation(y)
y = self.fc2(y)
return y if not self.return_residual else (y, x)
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout1=0., resid_dropout2=0.,
drop_path1=0., drop_path2=0.,
return_residual=False,
residual_in_fp32=False):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/block.py
For prenorm=True, this Block has a slightly different structure compared to a regular
prenorm Transformer block.
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
[Ref: https://arxiv.org/abs/2002.04745]
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
the hidden_states (output of the MLP) and the residual.
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
The residual needs to be provided (except for the very first block).
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
This is for performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
super().__init__()
self.prenorm = prenorm
self.return_residual = return_residual
self.residual_in_fp32 = residual_in_fp32
if self.residual_in_fp32:
assert self.prenorm, 'residual_in_fp32 is only compatible with prenorm=True'
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls(dim)
self.dropout1 = dropout_cls(resid_dropout1)
self.drop_path1 = StochasticDepth(drop_path1, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout2)
self.drop_path2 = StochasticDepth(drop_path2, mode='row')
self.norm2 = norm_cls(dim)
def forward(self, hidden_states, residual = None,
mixer_subset=None, mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
"""
if self.prenorm:
dropped = self.drop_path1(self.dropout1(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
if mixer_kwargs is None:
mixer_kwargs = {}
if mixer_subset is not None:
mixer_kwargs['mixer_subset'] = mixer_subset
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
if mixer_subset is not None:
residual = residual[:, mixer_subset]
if not isinstance(self.mlp, nn.Identity):
dropped = self.drop_path2(self.dropout2(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
)
if self.return_residual: # mixer out is actually a pair here
mixer_out, hidden_states = mixer_out
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if self.return_residual: # mlp out is actually a pair here
mlp_out, hidden_states = mlp_out
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
return hidden_states
def create_mixer_cls(layer=None,
attn_layer_idx=None, attn_cfg=None, layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop('causal', True)
mha_cls = MHA
mixer_cls = partial(mha_cls, causal=causal, layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),**factory_kwargs)
else:
mixer_cls = instantiate(registry.layer, layer, partial=True, layer_idx=layer_idx, **factory_kwargs)
return mixer_cls
def create_mlp_cls(d_model, d_inner=None, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'), **factory_kwargs)
return mlp_cls
def create_block(d_model, d_inner=None,
layer=None, attn_layer_idx=None,
attn_cfg=None, layer_norm_epsilon=1e-5,
resid_dropout1=0.0, resid_dropout2=0.0, residual_in_fp32=False,
layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
mixer_cls = create_mixer_cls(layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_idx=layer_idx,
**factory_kwargs)
mlp_cls = create_mlp_cls(d_model, d_inner=d_inner,
**factory_kwargs)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(d_model, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2,residual_in_fp32=residual_in_fp32)
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True,
glu_act=False):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(p[:out_features // 2], mean=0.0, std=initializer_range / math.sqrt(2 * n_layer) * 2)
class LMBackbone(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.process_group = process_group
self.residual_in_fp32 = residual_in_fp32
self.embeddings = GPT2Embeddings(d_model, vocab_size, max_position_embeddings,
**factory_kwargs)
self.layers = nn.ModuleList([create_block(
d_model, d_inner=d_inner,
layer=layer, attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout, residual_in_fp32=residual_in_fp32,layer_idx=i,
**factory_kwargs,
) for i in range(n_layer)])
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids,)
residual = None
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
return hidden_states
class SimpleLMHeadModel(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
pad_vocab_size_multiple: int = 1,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, residual_in_fp32=residual_in_fp32,
**factory_kwargs, **kwargs
)
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids)
lm_logits = self.lm_head(hidden_states)
CausalLMOutput = namedtuple('CausalLMOutput', ['logits'])
return CausalLMOutput(logits=lm_logits), None
| hyena-dna-main | src/models/sequence/simple_lm.py |
""" Implementation of FFN block in the style of Transformers """
from functools import partial
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import LinearActivation, DropoutNd
class FF(SequenceModule):
def __init__(self, d_input, expand=2, d_output=None, transposed=False, activation='gelu', initializer=None, dropout=0.0, tie_dropout=False):
super().__init__()
self.d_output = d_input if d_output is None else d_output
self.transposed = transposed
d_inner = expand * d_input
linear1 = LinearActivation(
d_input, d_inner,
transposed=transposed,
activation=activation,
initializer=initializer,
activate=True,
)
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
# dropout_cls = nn.Dropout2d if self.transposed else nn.Dropout
drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
linear2 = LinearActivation(
d_inner, self.d_output,
transposed=transposed,
activation=None,
initializer=initializer,
activate=False,
)
self.ff = nn.Sequential(
linear1,
drop,
linear2,
)
def forward(self, x, *args, **kwargs):
return self.ff(x), None
def step(self, x, state, **kwargs):
# x: [batch, d_input]
if self.transposed:
# expects: [batch, d_input, seq_len]
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
else:
return self.ff(x), state
| hyena-dna-main | src/models/sequence/ff.py |
'''PyTorch version of the block FFT convolution as described in the H3 paper.'''
import torch
from einops import rearrange
import math
from torch import nn
from src.models.nn import Activation
from src.utils.train import OptimModule
def ref_dft_matrix(N, H=1):
"""Compute the DFT matrix of size N x N.
This is where we could add extra compute for free."""
# n = torch.arange(N)
n = torch.arange(N).cuda()
k = n.view(-1, 1)
M = torch.exp(-2j * torch.pi * n * k / N)
return torch.view_as_real(M.repeat(H, 1, 1))
def compute_twiddle_factors(n, m):
"""Compute the twiddle factors of size n x m"""
# n_a = torch.arange(n).view(-1, 1)
# m_a = torch.arange(m)
n_a = torch.arange(n).cuda().view(-1, 1)
m_a = torch.arange(m).cuda()
N = n * m
M = torch.exp(-2j * torch.pi * n_a * m_a / N)
return torch.view_as_real(M)
def _cooley_tukey(
k, n, m,
dft_matrix=ref_dft_matrix,
max_m=16,
activation=None,
):
'''
Compute the FFT using the general Cooley-Tukey algorithm:
* Reshape to (m, n)
* Do n m-length FFTs along the rows
* Transpose to (n, m), multiply by twiddle factors
* Do m n-length FFTs along the rows
This function assumes that m <= 16 and recurses on n.
The base case is n <= 16 (we are simulating tensor cores of 16x16 mm).
The dft_matrix function is overwriteable
so that we can replace it with learnable parameters in a model.
'''
assert m <= max_m
if activation is not None:
act_fn = Activation(activation)
k = rearrange(k, '... (m n) -> ... m n', m=m, n=n) # (m, n)
# do n m-length FFTs
if activation is None:
mat = torch.view_as_complex(dft_matrix(m))
k_f = torch.einsum('... m o, ... o n -> ... m n', mat, k) # (..., m, n)
else:
mat = torch.view_as_complex(dft_matrix(m))
k_f = torch.view_as_complex(act_fn(
torch.view_as_real(torch.einsum('... m o, ... o n -> ... m n', mat, k))
)) # (..., m, n)
# multiply by twiddle factors
twi = torch.view_as_complex(compute_twiddle_factors(n, m)) # (n, m)
k_f = torch.einsum('n m, ... m n -> ... n m', twi, k_f) # (..., n, m)
if n <= max_m:
# do m n-length FFTs
if activation is None:
mat = torch.view_as_complex(dft_matrix(n))
k_f = torch.einsum('... n o, ... o m -> ... n m', mat, k_f) # (.., n, m)
else:
mat = torch.view_as_complex(dft_matrix(n))
k_f = torch.view_as_complex(act_fn(
torch.view_as_real(torch.einsum('... n o, ... o m -> ... n m', mat, k_f))
)) # (.., n, m)
else:
# recurse
k_f = rearrange(k_f, '... h n m -> ... m h n')
k_f = _cooley_tukey(k_f, n // max_m, max_m, dft_matrix, max_m, activation)
k_f = rearrange(k_f, '... m h n -> ... h n m')
# reshape for the output
k_f = rearrange(k_f, '... n m -> ... (n m)') # (..., n*m)
return k_f
def block_fft(
k, N,
dft_matrix=ref_dft_matrix,
max_m=16,
**kwargs,
):
'''
Compute the FFT of size N of the vector k, using _block_fft_recurse.
The dft_matrix function is overwriteable
so that we can replace it with learnable parameters in a model.
'''
if not math.log(N, 2).is_integer():
N = int(2 ** math.ceil(math.log(N, 2)))
# pad k with zeros if necessary (e.g. for causality)
if k.shape[-1] != N:
k = nn.ConstantPad1d((0, N - k.shape[-1]), 0)(k)
if N <= max_m:
mat = torch.view_as_complex(dft_matrix(m))
return torch.einsum('... n o, ... o -> ... n', mat, k) # (.., n, m)
n = N // max_m
m = max_m
return _cooley_tukey(k, n, m, dft_matrix, max_m, **kwargs)
class BlockFFT(OptimModule):
'''
Learnable Block FFT module.
Args:
learn_dft_matrix (bool): If True, learn a different DFT matrix for lengths 2, 4, 8, and 16. If False, this module computes a normal FFT.
'''
def __init__(self, learn_dft_matrices=True, H=1, max_m=16, dft_lr=0.001, dropout=0, learn_additive=False, **block_fft_args):
super().__init__()
self.learn_dft_matrices = learn_dft_matrices
self.block_fft_args = block_fft_args
self.max_m=max_m
self.drop = torch.nn.Dropout(p=dropout)
self.learn_additive=learn_additive
# get the powers of 2 up to max_m
assert math.log(max_m, 2).is_integer(), 'max_m must be a power of 2'
self.powers = [ 2 ** (i + 1) for i in range(int(math.log(max_m, 2))) ]
if learn_dft_matrices:
assert dft_lr>0,"If learn_dft_matrices=True dft_lr must be positive"
self.dft_matrices = nn.ParameterList()
for n in self.powers:
setattr(self,f"mat_{n}",nn.Parameter(
0.01 * torch.randn(H, n, n, 2) if self.learn_additive
else ref_dft_matrix(n, H=H),
requires_grad=True))
self.register(f"mat_{n}",getattr(self,f"mat_{n}"),dft_lr)
self.dft_matrices.append(getattr(self,"mat_{}".format(n)))
def compute_dft_matrix(self, n):
if not self.learn_dft_matrices:
return ref_dft_matrix(n)
else:
assert n in self.powers
if self.learn_additive:
mat = ref_dft_matrix(n)
return mat + self.drop(self.dft_matrices[int(math.log(n, 2) - 1)])
else:
return self.drop(self.dft_matrices[int(math.log(n, 2) - 1)])
def forward(self, x, N,forward=True):
'''Compute an FFT (forward=True) or iFFT (forward=False) of length N over x.'''
if forward:
return block_fft(x, N, dft_matrix=self.compute_dft_matrix, **self.block_fft_args)
else:
return (1/(N))*torch.conj(block_fft(torch.conj(x), N, dft_matrix=self.compute_dft_matrix, **self.block_fft_args))
if __name__ == "__main__":
B = 128
H = 29
N = 8192
n = 2
m = 8
k = torch.randn(B, H, N).to(torch.complex64)
print(f'(B, H, N) = ({B}, {H}, {N})')
# test FFT
k_f = block_fft(k, N)
k_f_ref = torch.fft.fft(k, N)
print('L-inf error in FFT: ', torch.max(torch.abs(k_f - k_f_ref)).item()) | hyena-dna-main | src/models/sequence/block_fft.py |
from .base import SequenceModule, TransposedModule
from .model import SequenceModel
from .ff import FF
| hyena-dna-main | src/models/sequence/__init__.py |
from functools import partial
import torch
import torch.nn as nn
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
# grab all functions / modules from long_conv_lm.py
from src.models.sequence.long_conv_lm import LMBackbone
from src.models.sequence.long_conv_lm import _init_weights
class DNAEmbeddingModel(nn.Module, GenerationMixin):
"""DNA Embedding Model, which is the same as ConvLMHeadModel (in long_conv_lm.py), except no decoder head, we just pass back the hidden states for downstream tasks."""
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1, dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,
fused_mlp=False, fused_dropout_add_ln=False, residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, sequence_parallel=True,
device=None, dtype=None, return_hidden_state=False, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model # for decoder
self.process_group = process_group
self.return_hidden_state = return_hidden_state
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
process_group=process_group,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
dropout_cls=dropout_cls, layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln, residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
**factory_kwargs, **kwargs
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError('fused_dense_lib is not installed')
self.lm_head = ColumnParallelLinear(
d_model, vocab_size, process_group, bias=False,
sequence_parallel=sequence_parallel, **factory_kwargs
)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids,
inference_params=inference_params)
# we only need the last hidden state for embeddings (decoder head will predict classification task)
return hidden_states, None
@property
def d_output(self):
"""Model /embedding dimension, used for decoder mapping.
"""
if getattr(self, "d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_output")
return self.d_model
def load_backbone(model, state_dict, freeze_backbone=False, ignore_head=True):
"""
Modifies state dict loading with custom function. This is necessary because the head of
a lm outputs logits for vocab, but we just the embeddings for downstream tasks.
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# consumes prefix from pretrained model, if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, "model."
)
model_new_params_dict = model.state_dict()
updated_model_state_dict = {}
# loop through scratch model keys (pretrained may have extra stuff)
for key in sorted(model_new_params_dict.keys()):
loaded_params = state_dict.get(key, None)
# make sure key is in the loaded params first, if not, then print it out
if loaded_params is None:
# This should never happen, it should be there!
print("Missing key in pretrained model!", key)
raise Exception
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, load from scratch", key)
# using scratch by default, nothing needed
used_params = model_new_params_dict[key]
elif "decoder" in key:
print("found decoder key / parameter, load from scratch", key)
used_params = model_new_params_dict[key]
else:
print('key: shape MATCH, loading', key) # load matched weights
used_params = loaded_params
# we need to pass back a state dict with the '.model' prefix!!!!!
key_with_prefix = 'model.' + key
updated_model_state_dict[key_with_prefix] = used_params
if freeze_backbone:
print("freezing model backbone params!")
# note, decoder not included in backbone
for name, param in model.named_parameters():
param.requires_grad = False
# we have updated the new model state dict with pretrained now
return updated_model_state_dict | hyena-dna-main | src/models/sequence/dna_embedding.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import opt_einsum as oe
optimized = True
if optimized:
contract = oe.contract
else:
contract = torch.einsum
from src.models.nn import LinearActivation, Activation, DropoutNd
from src.models.sequence.block_fft import BlockFFT
from src.models.sequence.long_conv_kernel import LongConvKernel
class LongConv(nn.Module):
def __init__(
self,
d_model,
l_max=1024,
channels=1,
bidirectional=False,
# Arguments for position-wise feedforward components
activation='gelu', # activation between conv and FF
postact='glu', # activation after FF
initializer=None, # initializer on FF
weight_norm=False, # weight normalization on FF
dropout=0.0, tie_dropout=False,
transposed=True, # axis ordering (B, L, D) or (B, D, L)
verbose=False,
block_fft_conv=False, # replace the FFT conv with Monarch blocks
block_fft_conv_args={},
# SSM Kernel arguments
**kernel_args,
):
"""
d_state: the dimension of the state, also denoted by N
l_max: the maximum kernel length, also denoted by L
channels: can be interpreted as a number of "heads"; the SSM is a map from a 1-dim to C-dim sequence. It's not recommended to change this unless desperate for things to tune; instead, increase d_model for larger models
bidirectional: if True, convolution kernel will be two-sided
Position-wise feedforward components:
--------------------
activation: activation in between SS and FF
postact: activation after FF ('id' for no activation, None to remove FF layer)
initializer: initializer on FF
weight_norm: weight normalization on FF
dropout: standard dropout argument. tie_dropout=True ties the dropout mask across the sequence length, emulating nn.Dropout1d
Other arguments:
--------------------
transposed: choose backbone axis ordering of (B, L, H) (if False) or (B, H, L) (if True) [B=batch size, L=sequence length, H=hidden dimension]
"""
super().__init__()
if verbose:
import src.utils.train
log = src.utils.train.get_logger(__name__)
log.info(f"Constructing Long Conv (H, L) = ({d_model}, {l_max})")
self.d_model = d_model
self.H = d_model
self.L = l_max
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
self.block_fft_conv = block_fft_conv
self.block_fft_conv_args = block_fft_conv_args
self.D = nn.Parameter(torch.randn(channels, self.H))
if self.bidirectional:
channels *= 2
# SSM Kernel
self.kernel = LongConvKernel(self.H, L=self.L, channels=channels, verbose=verbose, **kernel_args)
if self.block_fft_conv:
self.block_fft_u = BlockFFT(**self.block_fft_conv_args)
self.block_fft_k = BlockFFT(**self.block_fft_conv_args)
# Pointwise
self.activation = Activation(activation)
# dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout # Broken in torch==1.11
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
if postact is None:
self.output_linear = nn.Identity()
else:
self.output_linear = LinearActivation(
self.d_model * self.channels,
self.d_model,
# self.H*self.channels,
# self.d_model*(1 if self.gate is None else self.gate),
transposed=self.transposed,
initializer=initializer,
activation=postact,
activate=True,
weight_norm=weight_norm,
)
def forward(self, u, state=None, rate=1.0, lengths=None, **kwargs): # absorbs return_output and transformer src mask
"""
u: (B H L) if self.transposed else (B L H)
state: (H N) never needed, remnant from state spaces repo
Returns: same shape as u
"""
if not self.transposed: u = u.transpose(-1, -2)
L = u.size(-1)
# Mask out padding tokens
# TODO handle option for mask - instead of lengths, which assumes suffix padding
if isinstance(lengths, int):
if lengths != L:
lengths = torch.tensor(lengths, dtype=torch.long, device=u.device)
else:
lengths = None
if lengths is not None:
assert isinstance(lengths, torch.Tensor) and lengths.ndim == 1 and lengths.size(0) in [1, u.size(0)]
mask = torch.where(torch.arange(L, device=lengths.device) < lengths[:, None, None], 1., 0.)
u = u * mask
# Compute SS Kernel
L_kernel = L if self.L is None else min(L, round(self.L / rate))
k, _ = self.kernel(L=L_kernel, rate=rate, state=state) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0))
if self.block_fft_conv:
k_f = self.block_fft_k(k.to(torch.complex64), N=L_kernel+L) # (C H L)
u_f = self.block_fft_u(u.to(torch.complex64), N=L_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
if self.learn_ifft:
y = self.block_fft_u(y_f, N=L_kernel+L,forward=False).real[..., :L]
else:
y = torch.fft.ifft(y_f, n=L_kernel+L, dim=-1).real[..., :L] # (B C H L)
else:
k_f = torch.fft.rfft(k, n=L_kernel+L) # (C H L)
u_f = torch.fft.rfft(u, n=L_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', u_f, k_f)
y = torch.fft.irfft(y_f, n=L_kernel+L)[..., :L] # (B C H L)
# Compute skip connection
y = y + contract('bhl,ch->bchl', u, self.D)
# Reshape to flatten channels
y = rearrange(y, '... c h l -> ... (c h) l')
if not self.transposed: y = y.transpose(-1, -2)
y = self.activation(y)
y = self.dropout(y)
y = self.output_linear(y)
return y, None
@property
def d_state(self):
return self.H
@property
def d_output(self):
return self.d_model
| hyena-dna-main | src/models/sequence/long_conv.py |
import copy
import math
import re
from functools import partial
from collections import namedtuple, OrderedDict
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from einops import rearrange
from flash_attn.modules.mha import MHA, ParallelMHA
from flash_attn.modules.mlp import Mlp, FusedMLP, ParallelFusedMLP
from flash_attn.modules.block import Block
from flash_attn.modules.embedding import GPT2Embeddings, ParallelGPT2Embeddings
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params, all_gather_raw
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
try:
from flash_attn.ops.layer_norm import dropout_add_layer_norm
except ImportError:
dropout_add_layer_norm = None
from src.utils import instantiate
import src.utils.registry as registry
class CheckpointedModule(torch.nn.Module):
def __init__(self, layer):
super().__init__()
self.layer = layer
def forward(self, x):
return checkpoint(self.layer, x)
def create_mixer_cls(
layer=None,
process_group=None,
attn_layer_idx=None,
attn_cfg=None,
layer_idx=None,
sequence_parallel=True,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
parallel_kwargs = (
{"process_group": process_group, "sequence_parallel": sequence_parallel}
if process_group is not None
else {}
)
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop("causal", True)
fused_bias_fc = (
False if attn_cfg is None else attn_cfg.get("fused_bias_fc", False)
)
if not fused_bias_fc:
assert process_group is None, "TensorParallel MHA requires fused_bias_fc"
mha_cls = MHA if process_group is None else ParallelMHA
# ParallelMHA doesn't take 'fused_bias_fc', it is assumed that we fuse matmul + bias
if process_group is not None:
attn_cfg = copy.deepcopy(attn_cfg) # Don't modify the original cfg
attn_cfg.pop("fused_bias_fc", None)
mixer_cls = partial(
mha_cls,
causal=causal,
layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),
**parallel_kwargs,
**factory_kwargs,
)
else:
fused_bias_fc = False if layer is None else layer.get("fused_bias_fc", False)
if process_group is not None:
assert fused_bias_fc, "TensorParallel SSM requires fused_bias_fc"
mixer_cls = instantiate(
registry.layer,
layer,
partial=True,
layer_idx=layer_idx,
**factory_kwargs,
**parallel_kwargs,
)
# mixer_cls = partial(ssm_cls, layer_idx=layer_idx,
# **(ssm_cfg if ssm_cfg is not None else {}),
# **parallel_kwargs, **factory_kwargs)
return mixer_cls
def create_mlp_cls(
d_model,
d_inner=None,
process_group=None,
fused_mlp=False,
sequence_parallel=True,
identity_mlp=False,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
if process_group is not None:
assert fused_mlp, "Tensor Parallel is only implemented for FusedMLP"
if not fused_mlp and not identity_mlp:
mlp_cls = partial(
Mlp,
hidden_features=inner_dim,
activation=partial(F.gelu, approximate="tanh"),
**factory_kwargs,
)
elif fused_mlp:
mlp_cls = FusedMLP if process_group is None else ParallelFusedMLP
parallel_kwargs = (
{"process_group": process_group, "sequence_parallel": sequence_parallel}
if process_group is not None
else {}
)
mlp_cls = partial(
mlp_cls, hidden_features=inner_dim, **parallel_kwargs, **factory_kwargs
)
else:
mlp_cls = nn.Identity
return mlp_cls
def create_block(
d_model,
d_inner=None,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
layer_norm_epsilon=1e-5,
resid_dropout1=0.0,
resid_dropout2=0.0,
residual_in_fp32=False,
fused_mlp=False,
identity_mlp=False,
fused_dropout_add_ln=False,
layer_idx=None,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
mixer_cls = create_mixer_cls(
layer=layer,
process_group=process_group,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
layer_idx=layer_idx,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
mlp_cls = create_mlp_cls(
d_model,
d_inner=d_inner,
process_group=process_group,
fused_mlp=fused_mlp,
identity_mlp=identity_mlp,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(
d_model,
mixer_cls,
mlp_cls,
norm_cls=norm_cls,
prenorm=True,
resid_dropout1=resid_dropout1,
resid_dropout2=resid_dropout2,
fused_dropout_add_ln=fused_dropout_add_ln,
residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel and process_group is not None,
mark_shared_params=process_group is not None,
)
block.layer_idx = layer_idx
if checkpoint_mlp:
block.mlp = CheckpointedModule(block.mlp)
if checkpoint_mixer:
block.mixer = CheckpointedModule(block.mixer)
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(
module,
n_layer,
initializer_range=0.02,
rescale_prenorm_residual=True,
glu_act=False,
):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(
p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer)
)
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(
p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer)
)
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(
p[: out_features // 2],
mean=0.0,
std=initializer_range / math.sqrt(2 * n_layer) * 2,
)
class LMBackbone(nn.Module):
def __init__(
self,
d_model: int,
n_layer: int,
d_inner: int,
vocab_size: int,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
max_position_embeddings=0,
resid_dropout: float = 0.0,
embed_dropout: float = 0.1,
dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5,
initializer_cfg=None,
fused_mlp=False,
identity_mlp=False,
fused_dropout_add_ln=False,
residual_in_fp32=False,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
**kwargs,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.process_group = process_group
self.sequence_parallel = sequence_parallel
self.residual_in_fp32 = residual_in_fp32
if process_group is None:
self.embeddings = GPT2Embeddings(
d_model, vocab_size, max_position_embeddings, **factory_kwargs
)
else:
self.embeddings = ParallelGPT2Embeddings(
d_model,
vocab_size,
max_position_embeddings,
process_group=process_group,
sequence_parallel=self.sequence_parallel,
**factory_kwargs,
)
# We change the order of dropout, residual and layer norm:
# Instead of LN -> Attn / MLP -> Dropout -> Add, we do:
# Dropout -> Add -> LN -> Attn / MLP, returning both the residual branch (output of Add) and
# the main branch (output of MLP). The model definition is unchanged, but the mapping of the
# nn.Dropout probabilities are changed.
# This is for performance reason: we can fuse dropout + add + layer_norm.
self.fused_dropout_add_ln = fused_dropout_add_ln
if self.fused_dropout_add_ln and dropout_add_layer_norm is None:
raise ImportError("dropout_add_layer_norm is not installed")
self.layers = nn.ModuleList(
[
create_block(
d_model,
d_inner=d_inner,
process_group=process_group,
layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout,
residual_in_fp32=residual_in_fp32,
fused_mlp=fused_mlp,
identity_mlp=identity_mlp,
fused_dropout_add_ln=fused_dropout_add_ln,
layer_idx=i,
sequence_parallel=self.sequence_parallel,
checkpoint_mlp=checkpoint_mlp,
checkpoint_mixer=checkpoint_mixer,
**factory_kwargs,
)
for i in range(n_layer)
]
)
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
if process_group is not None:
for p in self.ln_f.parameters():
# Mark the norm parameters as "shared_params" so that we sync their values at init.
p._shared_params = True
# Mark the norm params as "sequence_parallel" so we run all-reduce on their grads.
if self.sequence_parallel:
p._sequence_parallel = True
self.apply(
partial(
_init_weights,
n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {}),
)
)
self.tie_weights()
def tie_weights(self):
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None):
# If using Tensor Parallel with sequence parallel, we combine the batch and the seqlen
# dimensions so that we can split on it easily, in case of small batch size.
# Only the attention/SSM layers need to know the seqlen.
embedding_kwargs = (
{"combine_batch_seqlen_dim": True}
if self.process_group is not None and self.sequence_parallel
else {}
)
hidden_states = self.embeddings(
input_ids, position_ids=position_ids, **embedding_kwargs
)
residual = None
mixer_kwargs = (
{"seqlen": input_ids.shape[1]}
if self.process_group is not None and self.sequence_parallel
else {}
)
if inference_params is not None:
mixer_kwargs["inference_params"] = inference_params
for layer in self.layers:
hidden_states, residual = layer(
hidden_states, residual, mixer_kwargs=mixer_kwargs
)
if not self.fused_dropout_add_ln:
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
else:
# Set prenorm=False here since we don't need the residual
hidden_states = dropout_add_layer_norm(
hidden_states,
residual,
self.ln_f.weight,
self.ln_f.bias,
self.drop_f.p if self.training else 0.0,
self.ln_f.eps,
prenorm=False,
residual_in_fp32=self.residual_in_fp32,
)
return hidden_states
class ConvLMHeadModel(nn.Module, GenerationMixin):
def __init__(
self,
d_model: int,
n_layer: int,
d_inner: int,
vocab_size: int,
process_group=None,
layer=None,
attn_layer_idx=None,
attn_cfg=None,
max_position_embeddings=0,
resid_dropout: float = 0.0,
embed_dropout: float = 0.1,
dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5,
initializer_cfg=None,
fused_mlp=False,
fused_dropout_add_ln=False,
residual_in_fp32=False,
pad_vocab_size_multiple: int = 1,
sequence_parallel=True,
checkpoint_mlp=False,
checkpoint_mixer=False,
device=None,
dtype=None,
**kwargs,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.process_group = process_group
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (
vocab_size % pad_vocab_size_multiple
)
self.backbone = LMBackbone(
d_model=d_model,
n_layer=n_layer,
d_inner=d_inner,
vocab_size=vocab_size,
process_group=process_group,
layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout,
embed_dropout=embed_dropout,
dropout_cls=dropout_cls,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg,
fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln,
residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
checkpoint_mlp=checkpoint_mlp,
checkpoint_mixer=checkpoint_mixer,
**factory_kwargs,
**kwargs,
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError("fused_dense_lib is not installed")
self.lm_head = ColumnParallelLinear(
d_model,
vocab_size,
process_group,
bias=False,
sequence_parallel=sequence_parallel,
**factory_kwargs,
)
# Initialize weights and apply final processing
self.apply(
partial(
_init_weights,
n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {}),
)
)
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(
self, input_ids, position_ids=None, inference_params=None, state=None
): # state for the repo interface
hidden_states = self.backbone(
input_ids, position_ids=position_ids, inference_params=inference_params
)
lm_logits = self.lm_head(hidden_states)
# During inference, we want the full logit for sampling
if ColumnParallelLinear is not None and inference_params is not None:
if isinstance(self.lm_head, ColumnParallelLinear):
lm_logits, _ = all_gather_raw(lm_logits, self.lm_head.process_group)
lm_logits = rearrange(
lm_logits, "(n b) s d -> b s (n d)", b=hidden_states.shape[0]
)
CausalLMOutput = namedtuple("CausalLMOutput", ["logits"])
return CausalLMOutput(logits=lm_logits), None
class DNAEmbeddingModel(nn.Module, GenerationMixin):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1, dropout_cls=nn.Dropout,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,
fused_mlp=False, fused_dropout_add_ln=False, residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, sequence_parallel=True,
device=None, dtype=None, return_hidden_state=False, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.d_model = d_model # for decoder
self.process_group = process_group
self.return_hidden_state = return_hidden_state
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
process_group=process_group,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
dropout_cls=dropout_cls, layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, fused_mlp=fused_mlp,
fused_dropout_add_ln=fused_dropout_add_ln, residual_in_fp32=residual_in_fp32,
sequence_parallel=sequence_parallel,
**factory_kwargs, **kwargs
)
if process_group is None:
self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs)
else:
if ColumnParallelLinear is None:
raise ImportError('fused_dense_lib is not installed')
self.lm_head = ColumnParallelLinear(
d_model, vocab_size, process_group, bias=False,
sequence_parallel=sequence_parallel, **factory_kwargs
)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.backbone.embeddings.word_embeddings.weight
if self.process_group is not None:
sync_shared_params(self, self.process_group)
def forward(self, input_ids, position_ids=None, inference_params=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids,
inference_params=inference_params)
# we only need the last hidden state for embeddings (decoder head will predict classification task)
return hidden_states, None
@property
def d_output(self):
"""Model /embedding dimension, used for decoder mapping.
"""
if getattr(self, "d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_output")
return self.d_model
def load_backbone(model, state_dict, freeze_backbone=False, ignore_head=True):
"""
Modifies state dict loading with custom function. Every layer in new model will be
inputs:
model: nn.Module, the from 'scratch' model
state_dict: dict, from the pretrained weights
ignore_head: bool, whether to inflate weights in the head (or keep scratch weights).
If number of classes changes (eg, imagenet to hmdb51), then you need to use this.
return:
state_dict: dict, update with inflated weights
"""
# consumes prefix from pretrained model, if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, "model."
)
model_new_params_dict = model.state_dict()
updated_model_state_dict = {}
# loop through scratch model keys (pretrained may have extra stuff)
for key in sorted(model_new_params_dict.keys()):
loaded_params = state_dict.get(key, None)
# make sure key is in the loaded params first, if not, then print it out
if loaded_params is None:
# This should never happen, it should be there!
print("Missing key in pretrained model!", key)
raise Exception
elif ignore_head and 'head' in key:
# ignore head weights
print("found head key / parameter, load from scratch", key)
# using scratch by default, nothing needed
used_params = model_new_params_dict[key]
elif "decoder" in key:
print("found decoder key / parameter, load from scratch", key)
used_params = model_new_params_dict[key]
else:
print('key: shape MATCH, loading', key) # load matched weights
used_params = loaded_params
# we need to pass back a state dict with the '.model' prefix!!!!!
key_with_prefix = 'model.' + key
updated_model_state_dict[key_with_prefix] = used_params
if freeze_backbone:
print("freezing model backbone params!")
# note, decoder not included in backbone
for name, param in model.named_parameters():
param.requires_grad = False
# we have updated the new model state dict with pretrained now
return updated_model_state_dict
def shard_state_dict_tp(state_dict, world_size, rank, pad_vocab_size_multiple=1):
"""Convert the state_dict of a standard SSM model to the state_dict of a SSM model
with tensor parallel.
"""
layer_idx_match = [
re.search(r"backbone\.layers\.(\d+)\.", k) for k in state_dict.keys()
]
num_hidden_layers = len(set(m.group(1) for m in layer_idx_match if m is not None))
vocab_size = state_dict["backbone.embeddings.word_embeddings.weight"].shape[0]
inner_dim, hidden_size = state_dict["backbone.layers.0.mlp.fc1.weight"].shape
vocab_size = (
math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple
)
assert vocab_size % world_size == 0
assert hidden_size % world_size == 0
assert inner_dim % world_size == 0
def shard_dim(state_dict, key, dim=0):
x = state_dict[key]
dimension = x.shape[dim] // world_size
state_dict[key] = x.narrow(dim, rank * dimension, dimension)
def shard_qkv_headdim(state_dict, key):
x = rearrange(state_dict[key], "(three d) ... -> three d ...", three=3)
dim = x.shape[1] // world_size
state_dict[key] = rearrange(
x[:, rank * dim : (rank + 1) * dim], "three d ... -> (three d) ..."
)
shard_dim(state_dict, "backbone.embeddings.word_embeddings.weight", 0)
if "lm_head.weight" in state_dict:
shard_dim(state_dict, "lm_head.weight", 0)
if "backbone.embeddings.position_embeddings.weight" in state_dict:
shard_dim(state_dict, "backbone.embeddings.position_embeddings.weight", -1)
for i in range(num_hidden_layers):
shard_qkv_headdim(state_dict, f"backbone.layers.{i}.mixer.Wqkv.weight")
shard_qkv_headdim(state_dict, f"backbone.layers.{i}.mixer.Wqkv.bias")
shard_dim(state_dict, f"backbone.layers.{i}.mixer.out_proj.weight", -1)
if rank != 0:
state_dict.pop(f"backbone.layers.{i}.mixer.out_proj.bias")
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc1.weight", 0)
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc1.bias", 0)
shard_dim(state_dict, f"backbone.layers.{i}.mlp.fc2.weight", -1)
if rank != 0:
state_dict.pop(f"backbone.layers.{i}.mlp.fc2.bias")
if f"backbone.layers.{i}.mixer.kernel.kernel.B" in state_dict:
for name in [
"D",
"ssm_k_D",
"kernel.kernel.B",
"kernel.kernel.inv_A_real",
"kernel.kernel.A_imag",
"ssm_k_kernel.kernel.B",
"kernel.kernel.log_dt",
]:
if f"backbone.layers.{i}.mixer.{name}" in state_dict:
shard_dim(state_dict, f"backbone.layers.{i}.mixer.{name}", 0)
for name in ["kernel.kernel.C", "ssm_k_kernel.kernel.C"]:
if f"backbone.layers.{i}.mixer.{name}" in state_dict:
shard_dim(state_dict, f"backbone.layers.{i}.mixer.{name}", 1)
return state_dict | hyena-dna-main | src/models/sequence/long_conv_lm.py |
""" Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic (batch, length, d_input) -> (batch, length, d_output) transformation
"""
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from src.utils.config import to_list, to_dict
from src.models.sequence.block import SequenceResidualBlock
from src.models.sequence.base import SequenceModule
from src.models.nn.components import Normalization, DropoutNd
class SequenceModel(SequenceModule):
def __init__(
self,
d_model, # Resize input (useful for deep models with residuals)
n_layers=1, # Number of layers
transposed=False, # Transpose inputs so each layer receives (batch, dim, length)
dropout=0.0, # Dropout parameter applied on every residual and every layer
tie_dropout=False, # Tie dropout mask across sequence like nn.Dropout1d/nn.Dropout2d
prenorm=True, # Pre-norm vs. post-norm
n_repeat=1, # Each layer is repeated n times per stage before applying pooling
layer=None, # Layer config, must be specified
residual=None, # Residual config
norm=None, # Normalization config (e.g. layer vs batch)
pool=None, # Config for pooling layer per stage
track_norms=True, # Log norms of each layer output
dropinp=0.0, # Input dropout
):
super().__init__()
# Save arguments needed for forward pass
self.d_model = d_model
self.transposed = transposed
self.track_norms = track_norms
# Input dropout (not really used)
dropout_fn = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropinp) if dropinp > 0.0 else nn.Identity()
layer = to_list(layer, recursive=False)
# Some special arguments are passed into each layer
for _layer in layer:
# If layers don't specify dropout, add it
if _layer.get('dropout', None) is None:
_layer['dropout'] = dropout
# Ensure all layers are shaped the same way
_layer['transposed'] = transposed
# Duplicate layers
layers = layer * n_layers * n_repeat
# Instantiate layers
_layers = []
d = d_model
for l, layer in enumerate(layers):
# Pool at the end of every n_repeat blocks
pool_cfg = pool if (l+1) % n_repeat == 0 else None
block = SequenceResidualBlock(d, l+1, prenorm=prenorm, dropout=dropout, tie_dropout=tie_dropout, transposed=transposed, layer=layer, residual=residual, norm=norm, pool=pool_cfg)
_layers.append(block)
d = block.d_output
self.d_output = d
self.layers = nn.ModuleList(_layers)
if prenorm:
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(self.d_output, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(self.d_output, transposed=self.transposed, **norm)
else:
self.norm = nn.Identity()
def forward(self, inputs, *args, state=None, **kwargs):
""" Inputs assumed to be (batch, sequence, dim) """
if self.transposed: inputs = rearrange(inputs, 'b ... d -> b d ...')
inputs = self.drop(inputs)
# Track norms
if self.track_norms: output_norms = [torch.mean(inputs.detach() ** 2)]
# Apply layers
outputs = inputs
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
outputs, state = layer(outputs, *args, state=prev_state, **kwargs)
next_states.append(state)
if self.track_norms: output_norms.append(torch.mean(outputs.detach() ** 2))
if self.norm is not None: outputs = self.norm(outputs)
if self.transposed: outputs = rearrange(outputs, 'b d ... -> b ... d')
if self.track_norms:
metrics = to_dict(output_norms, recursive=False)
self.metrics = {f'norm/{i}': v for i, v in metrics.items()}
return outputs, next_states
@property
def d_state(self):
d_states = [layer.d_state for layer in self.layers]
return sum([d for d in d_states if d is not None])
@property
def state_to_tensor(self):
# Slightly hacky way to implement this in a curried manner (so that the function can be extracted from an instance)
# Somewhat more sound may be to turn this into a @staticmethod and grab subclasses using hydra.utils.get_class
def fn(state):
x = [_layer.state_to_tensor(_state) for (_layer, _state) in zip(self.layers, state)]
x = [_x for _x in x if _x is not None]
return torch.cat( x, dim=-1)
return fn
def default_state(self, *batch_shape, device=None):
return [layer.default_state(*batch_shape, device=device) for layer in self.layers]
def step(self, x, state, **kwargs):
# Apply layers
prev_states = [None] * len(self.layers) if state is None else state
next_states = []
for layer, prev_state in zip(self.layers, prev_states):
x, state = layer.step(x, state=prev_state, **kwargs)
next_states.append(state)
x = self.norm(x)
return x, next_states
| hyena-dna-main | src/models/sequence/model.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat
from src.utils.train import OptimModule
class LongConvKernel(OptimModule):
def __init__(
self,
H,
L,
channels=1,
learning_rate=None,
lam=0.1,
causal=True,
kernel_dropout=0,
weight_init="random",
use_ma_smoothing = False,
ma_window_len = 7,
smooth_freq = False,
**kwargs
):
super().__init__()
self.drop = torch.nn.Dropout(p=kernel_dropout)
self.H = H
self.weight_init = weight_init
self.causal = causal
self.L = L*2 if not causal else L
self.channels = channels
self.lam = lam
self.kernel = torch.nn.Parameter(self._parameter_initialization()) #(c,H,L)
self.register("kernel", self.kernel, learning_rate)
self.use_ma_smoothing=use_ma_smoothing
self.smooth_freq = smooth_freq
self.ma_window_len = ma_window_len
if self.use_ma_smoothing:
if smooth_freq:
weight = torch.arange(ma_window_len, dtype = self.kernel.dtype)
weight = torch.exp(-0.5 * torch.abs(weight - ma_window_len // 2) ** 2)
weight = repeat(weight, 'l -> h1 h2 l', h1 = self.H, h2 = 1)
weight = weight.type(torch.fft.rfft(self.kernel).dtype)
self.smooth_weight = weight
else:
self.ma_window_len = ma_window_len
assert self.ma_window_len%2!=0, "window size must be odd"
padding = (self.ma_window_len//2)
self.smooth = torch.nn.AvgPool1d(kernel_size=self.ma_window_len,stride=1,padding=padding)
def _parameter_initialization(self):
if self.weight_init=="random":
return torch.randn(self.channels, self.H, self.L) * 0.002
elif self.weight_init=="double_exp":
K = torch.randn(self.channels, self.H, self.L,dtype=torch.float32) * 0.02
double_exp = torch.zeros((self.H,self.L),dtype=torch.float32)
for i in range(self.H):
for j in range(self.L):
double_exp[i,j] = torch.exp(-(j/self.L)*torch.pow(torch.tensor(int(self.H/2)),torch.tensor(i/self.H)))
K = torch.einsum("c h l, h l -> c h l",K,double_exp)
return K
else: raise NotImplementedError(f"{self.weight_init} is not valid")
def forward(self, **kwargs):
k = self.kernel
if self.use_ma_smoothing:
if self.smooth_freq:
k_f = torch.fft.rfft(k, dim=-1)
k_f = F.conv1d(k_f, self.smooth_weight.to(k_f.device), padding='same', groups=self.H)
k = torch.fft.irfft(k_f, dim=-1)
else:
k = self.smooth(k)
k = F.relu(torch.abs(k)-self.lam)*torch.sign(k)
k = self.drop(k)
return k, None
@property
def d_output(self):
return self.H | hyena-dna-main | src/models/sequence/long_conv_kernel.py |
import math
import sys
from re import U
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange, repeat
try:
from src.ops.fftconv import fftconv_ref, fftconv_func, fftconv_heads_ref
except ImportError:
fftconv_func = None
try:
from flash_attn.ops.fused_dense import FusedDense
except ImportError:
FusedDense = None
import src.utils.registry as registry
from src.utils.train import OptimModule
from src.utils.config import instantiate, auto_assign_attrs
from src.models.nn import Activation
class FFTConvFuncv2(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k):
seqlen = u.shape[-1]
if len(u.shape) > 3:
k = k.unsqueeze(1)
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
ctx.save_for_backward(u_f, k_f)
return y
@staticmethod
def backward(ctx, dout):
u_f, k_f = ctx.saved_tensors
seqlen = dout.shape[-1]
fft_size = 2 * seqlen
dout_f = torch.fft.rfft(dout, n=fft_size)
du = torch.fft.irfft(dout_f * k_f.conj(), n=fft_size, norm="forward")[
..., :seqlen
]
dk = torch.fft.irfft(dout_f * u_f.conj(), n=fft_size, norm="forward")[
..., :seqlen
]
return du, dk.squeeze()
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3:
k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm="forward")[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, "b H -> b H 1")).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class Sin(nn.Module):
def __init__(self, dim, w=10, train_freq=True):
super().__init__()
self.freq = (
nn.Parameter(w * torch.ones(1, dim))
if train_freq
else w * torch.ones(1, dim)
)
def forward(self, x):
return torch.sin(self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float = 1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
shift: float = 0.0,
**kwargs,
):
super().__init__()
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
fused_fft_conv=False,
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
linear_mixer=False,
modulate: bool = True,
normalized=False,
**kwargs,
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
auto_assign_attrs(
self, d_model=d_model, emb_dim=emb_dim, seq_len=seq_len, modulate=modulate
)
self.use_bias = bias
self.fused_fft_conv = fused_fft_conv
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w)
assert (
emb_dim % 2 != 0 and emb_dim >= 3
), "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
# uses a variable number of inner linear layers
if linear_mixer is False:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
# final linear layer
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
else:
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, d_model, bias=False),
)
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
if self.modulate:
h = self.modulation(t, h)
if self.normalized:
h = h / torch.norm(h, dim=-1, p=1, keepdim=True)
return h
def forward(self, x, L, k=None, bias=None, *args, **kwargs):
if k is None:
k = self.filter(L)
# Ensure compatibility with filters that return a tuple
k = k[0] if type(k) is tuple else k
if bias is None:
bias = self.bias
bias = bias if self.use_bias else 0 * bias
if self.fused_fft_conv:
bias = bias.to(dtype=torch.float32)
y = fftconv_func(
x,
k,
bias,
dropout_mask=None,
gelu=False,
force_fp16_output=torch.is_autocast_enabled(),
)
else:
y = fftconv_ref(x, k, bias, dropout_mask=None, gelu=False)
# y = (
# FFTConvFuncv2.apply(x, k.to(dtype=torch.float32))
# + bias.unsqueeze(-1) * x
# )
return y.to(dtype=x.dtype)
class HyenaOperator(nn.Module):
def __init__(
self,
d_model,
l_max,
order=2,
filter_order=64,
num_heads=1,
inner_factor=1,
num_blocks=1,
fused_bias_fc=False,
outer_mixing=False,
dropout=0.0,
filter_dropout=0.0,
filter_cls="hyena-filter",
post_order_ffn=False,
jit_filter=False,
short_filter_order=3,
activation="id",
return_state=False,
**filter_args,
):
r"""
Hyena operator described in the paper https://arxiv.org/pdf/2302.10866.pdf
Args:
d_model (int): Dimension of the input and output embeddings (width of the layer)
l_max: (int): Maximum input sequence length. Defaults to None
order: (int): Depth of the Hyena recurrence. Defaults to 2
filter_order: (int): Width of the FFN parametrizing the implicit filter. Defaults to 64
num_heads: (int): Number of heads. Defaults to 1
inner_factor: (int): Width multiplier. Defaults to 1
num_blocks: (int): Number of blocks in sequence length. Defaults to 1
fused_bias_fc: (bool): Whether to use fused bias FC. Defaults to False
dropout: (float): Dropout probability. Defaults to 0.0
filter_dropout: (float): Dropout probability for the filter. Defaults to 0.0
post_order_ffn: (bool): Apply a dense layer between steps of the recurrence. Defaults to False
jit_filter: (bool): Whether JIT the implicit filter function. Defaults to False
short_filter_order: (int): Length of the explicit input convolutional filter. Defaults to 3
activation: (str): type of act between kernel output and FF (default identity)
return_state: (bool): whether to return a state
"""
super().__init__()
assert (
d_model % num_heads == 0
), f"Model dimension {d_model} must be divisible by num heads {num_heads}"
assert (
l_max % num_blocks == 0
), f"Maximum signal length {l_max} must be divisible by block dimension {num_blocks}"
block_dim = l_max // num_blocks
head_dim = d_model // num_heads
auto_assign_attrs(
self,
d_model=d_model,
order=order,
l_max=l_max,
num_heads=num_heads,
inner_factor=inner_factor,
block_dim=block_dim,
head_dim=head_dim,
filter_order=filter_order,
post_order_ffn=post_order_ffn,
short_filter_order=short_filter_order,
num_blocks=num_blocks,
filter_dropout=filter_dropout,
jit_filter=jit_filter,
outer_mixing=outer_mixing,
activation=activation,
return_state=return_state,
)
self.activation = Activation(activation)
self.dropout = nn.Dropout(dropout)
self.setup_projections(fused_bias_fc, inner_factor)
self.setup_filters(filter_cls, filter_args)
def setup_projections(self, fused_bias_fc, inner_factor):
"Initializes input and output projections (over the width dimension)"
if fused_bias_fc and FusedDense is None:
raise ImportError("fused_dense is not installed")
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
self.out_proj = linear_cls(self.d_model * inner_factor, self.d_model)
self.in_proj = linear_cls(self.d_model, (self.order + 1) * self.d_model)
if self.post_order_ffn:
self.ord_proj_w = nn.Parameter(
torch.randn(self.order, self.num_heads, self.num_heads)
/ math.sqrt(self.head_dim)
)
def setup_filters(self, filter_cls, filter_args):
"Initializes the explicit and implicit filters"
assert self.order >= 2, f"Order must be at least 2, (got {self.order})"
total_width = self.d_model * self.inner_factor * (self.order + 1)
self.short_filter = nn.Conv1d(
in_channels=total_width,
out_channels=total_width,
kernel_size=self.short_filter_order,
groups=total_width,
padding=self.short_filter_order - 1,
)
filter_cls = instantiate(registry.layer, filter_cls, partial=True)
self.filter_fn = filter_cls(
self.head_dim * self.inner_factor * (self.order - 1),
order=self.filter_order,
seq_len=self.l_max,
channels=1,
dropout=self.filter_dropout,
**filter_args,
)
if self.jit_filter:
self.filter_fn = torch.jit.script(self.filter_fn, self.L)
def recurrence(self, u, state):
"Fast inference mode via distilled recurrence"
raise NotImplementedError("Working on it!")
def forward(self, u, *args, **kwargs):
l = u.size(-2)
l_filter = min(l, self.l_max)
u = self.in_proj(u)
u = rearrange(u, "b l d -> b d l")
uc = self.short_filter(u)[..., :l_filter]
uc = rearrange(
uc,
"b (ho v) (z l) -> b ho v z l",
z=self.num_blocks,
ho=self.num_heads,
v=self.head_dim * (self.order + 1),
)
*x, v = uc.split(self.d_model, dim=2)
k = self.filter_fn.filter(l_filter)
# `c` is always 1 by default
k = rearrange(k, "c l (v o) -> c o v l", v=self.head_dim, o=self.order - 1)[0]
bias = rearrange(
self.filter_fn.bias, "(v o) -> o v", v=self.head_dim, o=self.order - 1
)
for o, x_i in enumerate(reversed(x[1:])):
if self.outer_mixing:
v = rearrange(v, "b h v z l -> b h 1 v z l")
v = self.dropout(v * rearrange(x_i, "b h v z l -> b h v 1 z l"))
v = v.sum(dim=2)
else:
v = self.dropout(v * x_i)
# the bias term is broadcasted. Last dimension (l) is handled by fftconv
v = self.filter_fn(v, l_filter, k=k[o], bias=bias[o, None, :, None])
if self.post_order_ffn:
w = self.ord_proj_w[o]
v = mul_sum(
rearrange(w, "h1 h2 -> 1 h1 h2 1 1 1"),
rearrange(v, "b h v z l -> b h 1 v z l"),
)
y = self.activation(
rearrange(
v * x[0],
"b h v z l -> b (z l) (h v)",
z=self.num_blocks,
h=self.num_heads,
)
)
y = self.out_proj(y)
if self.return_state:
return y, None
return y
@property
def d_output(self):
return self.d_model
| hyena-dna-main | src/models/sequence/hyena.py |
""" Implements a full residual block around a black box layer
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from torch import nn
from functools import partial
import src.utils as utils
from src.models.nn.components import Normalization, StochasticDepth, DropoutNd
from src.models.sequence import SequenceModule
from src.models.sequence.pool import registry as pool_registry
from src.models.nn.residual import registry as residual_registry
import src.utils.registry as registry
class SequenceResidualBlock(SequenceModule):
def __init__(
self,
d_input,
i_layer=None, # Only needs to be passed into certain residuals like Decay
prenorm=True,
dropout=0.0,
tie_dropout=False,
transposed=False,
layer=None, # Config for black box module
residual=None, # Config for residual function
norm=None, # Config for normalization layer
pool=None,
drop_path=0.,
):
super().__init__()
self.i_layer = i_layer
self.d_input = d_input
self.layer = utils.instantiate(registry.layer, layer, d_input)
self.prenorm = prenorm
self.transposed = transposed
# Residual
# d_residual is the output dimension after residual
if residual is None:
self.residual = None
self.d_residual = self.layer.d_output
else:
self.residual = utils.instantiate(residual_registry, residual, i_layer, d_input, self.layer.d_output)
self.d_residual = self.residual.d_output
# Normalization
d_norm = d_input if self.prenorm else self.d_residual
# We don't use config to directly instantiate since Normalization has some special cases
if norm is None:
self.norm = None
elif isinstance(norm, str):
self.norm = Normalization(d_norm, transposed=self.transposed, _name_=norm)
else:
self.norm = Normalization(d_norm, transposed=self.transposed, **norm)
# Pool
self.pool = utils.instantiate(pool_registry, pool, self.d_residual, transposed=self.transposed)
# Dropout
dropout_cls = partial(DropoutNd, transposed=self.transposed) if tie_dropout else nn.Dropout
self.drop = dropout_cls(dropout) if dropout > 0.0 else nn.Identity()
# Stochastic depth
self.drop_path = StochasticDepth(drop_path, mode='row') if drop_path > 0.0 else nn.Identity()
@property
def d_output(self):
return self.pool.d_output if self.pool is not None else self.d_residual
@property
def d_state(self):
return self.layer.d_state
@property
def state_to_tensor(self):
return self.layer.state_to_tensor
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def forward(self, x, state=None, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm: y = self.norm(y)
# Black box layer
y, state = self.layer(y, state=state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, self.drop_path(self.drop(y)), self.transposed)
# Post-norm
if self.norm is not None and not self.prenorm: y = self.norm(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
def step(self, x, state, **kwargs):
y = x
# Pre-norm
if self.norm is not None and self.prenorm:
y = self.norm.step(y)
# Black box layer
y, state = self.layer.step(y, state, **kwargs)
# Residual
if self.residual is not None: y = self.residual(x, y, transposed=False) # NOTE this would not work with concat residual function (catformer)
# Post-norm
if self.norm is not None and not self.prenorm:
y = self.norm.step(y)
# Pool
if self.pool is not None: y, _ = self.pool(y)
return y, state
| hyena-dna-main | src/models/sequence/block.py |
"""Implements downsampling and upsampling on sequences."""
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.nn import LinearActivation
""" Simple pooling functions that just downsample or repeat
stride: Subsample on the layer dimension
expand: Repeat on the feature dimension
"""
class DownSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.stride > 1:
assert x.ndim == 3, "Downsampling with higher-dimensional inputs is currently not supported. It is recommended to use average or spectral pooling instead."
if self.transposed:
x = x[..., 0::self.stride]
else:
x = x[..., 0::self.stride, :]
if self.expand > 1:
if self.transposed:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
else:
x = repeat(x, 'b ... d -> b ... (d e)', e=self.expand)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class DownAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=None, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
if self.expand is not None:
self.linear = LinearActivation(
d_input,
d_input * expand,
transposed=transposed,
)
def forward(self, x):
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
if self.stride > 1:
# einops appears slower than F
if x.ndim == 3:
x = F.avg_pool1d(x, self.stride, self.stride)
elif x.ndim == 4:
x = F.avg_pool2d(x, self.stride, self.stride)
else:
# Reduction string e.g. "b d (l1 2) (l2 2) -> b d l1 l2"
reduce_str = "b d " + " ".join([f"(l{i} {self.stride})" for i in range(x.ndim-2)]) \
+ " -> b d " + " ".join([f"l{i}" for i in range(x.ndim-2)])
x = reduce(x, reduce_str, 'mean')
# if self.expand > 1:
# x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
if self.expand is not None:
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
if self.expand is None:
return self.d_input
else:
return self.d_input * self.expand
class DownSpectralPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
"""
x: (B, L..., D)
"""
if not self.transposed:
x = rearrange(x, 'b ... d -> b d ...')
shape = x.shape[2:]
x_f = torch.fft.ifftn(x, s=shape)
for axis, l in enumerate(shape):
assert l % self.stride == 0, 'input length must be divisible by stride'
new_l = l // self.stride
idx = torch.cat([torch.arange(0, new_l-new_l//2), l+torch.arange(-new_l//2, 0)]).to(x_f.device)
x_f = torch.index_select(x_f, 2+axis, idx)
x = torch.fft.ifftn(x_f, s=[l//self.stride for l in shape])
x = x.real
if self.expand > 1:
x = repeat(x, 'b d ... -> b (d e) ...', e=self.expand)
if not self.transposed:
x = rearrange(x, 'b d ... -> b ... d')
return x, None
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
@property
def d_output(self):
return self.d_input * self.expand
class UpSample(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, transposed=True):
super().__init__()
self.d_input = d_input
self.stride = stride
self.expand = expand
self.transposed = transposed
def forward(self, x):
if x is None: return None
if self.expand > 1:
if self.transposed:
x = reduce(x, '... (d e) l -> ... d l', 'mean', e=self.expand)
else:
x = reduce(x, '... (d e) -> ... d', 'mean', e=self.expand)
if self.stride > 1:
if self.transposed:
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class UpAvgPool(SequenceModule):
def __init__(self, d_input, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
assert d_input % expand == 0
self.d_input = d_input
self.stride = stride
self.expand = expand
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_input // expand,
transposed=transposed,
)
def forward(self, x):
# TODO only works for 1D right now
if x is None: return None
x = self.linear(x)
if self.stride > 1:
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = repeat(x, '... l -> ... (l e)', e=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = repeat(x, '... l d -> ... (l e) d', e=self.stride)
return x, None
@property
def d_output(self):
return self.d_input // self.expand
def step(self, x, state, **kwargs):
if self.stride > 1 or self.expand > 1:
raise NotImplementedError
return x, state
class DownLinearPool(SequenceModule):
def __init__(self, d_model, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
self.d_model = d_model
self.stride = stride
self.expand = expand
self.transposed = transposed
self.linear = LinearActivation(
d_model * stride,
d_model * expand,
transposed=transposed,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
# if self.stride > 1 or self.expand > 1:
# raise NotImplementedError
# return x, state
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
@property
def d_output(self):
return self.d_input * self.expand
class UpLinearPool(SequenceModule):
def __init__(self, d, stride=1, expand=1, causal=False, transposed=True):
super().__init__()
# self.d_model = d * expand
# self.d_output = d
assert d % expand == 0
self.d_model = d
self.d_output = d // expand
# self._d_output = d_output
self.stride = stride
self.causal = causal
self.transposed = transposed
self.linear = LinearActivation(
self.d_model,
self.d_output * stride,
transposed=transposed,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
if self.causal:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
if self.causal:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
# @property
# def d_output(self): return self._d_output
""" Pooling functions with trainable parameters """ # TODO make d_output expand instead
class DownPool2d(SequenceModule):
def __init__(self, d_input, d_output, stride=1, transposed=True, weight_norm=True):
super().__init__()
self.linear = LinearActivation(
d_input,
d_output,
transposed=transposed,
weight_norm=weight_norm,
)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride),
def forward(self, x):
if self.transposed:
x = self.pool(x)
# TODO DownPool/UpPool are currently used by unet/sashimi backbones
# DownLinearPool is used by the registry (for isotropic backbone)
# DownPool is essentially the same as DownLinearPool. These should be consolidated
class DownPool(SequenceModule):
def __init__(self, d_input, d_output=None, expand=None, stride=1, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
assert (d_output is None) + (expand is None) == 1
if d_output is None: d_output = d_input * expand
self.d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input * stride,
d_output,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x):
if self.transposed:
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.stride)
else:
x = rearrange(x, '... (l s) h -> ... l (h s)', s=self.stride)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
if x is None: return None, state
state.append(x)
if len(state) == self.stride:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *batch_shape, device=None):
return []
class UpPool(SequenceModule):
def __init__(self, d_input, d_output, stride, transposed=True, weight_norm=True, initializer=None, activation=None):
super().__init__()
self.d_input = d_input
self._d_output = d_output
self.stride = stride
self.transposed = transposed
self.linear = LinearActivation(
d_input,
d_output * stride,
transposed=transposed,
initializer=initializer,
weight_norm = weight_norm,
activation=activation,
activate=True if activation is not None else False,
)
def forward(self, x, skip=None):
x = self.linear(x)
if self.transposed:
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.stride)
else:
x = F.pad(x[..., :-1, :], (0, 0, 1, 0)) # Shift to ensure causality
x = rearrange(x, '... l (h s) -> ... (l s) h', s=self.stride)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
if self.transposed: x = x.unsqueeze(-1)
x = self.linear(x)
if self.transposed: x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.stride)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.stride), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
@property
def d_output(self): return self._d_output
registry = {
'sample': DownSample,
'pool': DownAvgPool,
'avg': DownAvgPool,
'linear': DownLinearPool,
'spectral': DownSpectralPool,
}
up_registry = {
# 'sample': UpSample,
'pool': UpAvgPool,
'avg': UpAvgPool,
'linear': UpLinearPool,
# 'spectral': UpSpectralPool, # Not implemented and no way to make this causal
}
| hyena-dna-main | src/models/sequence/pool.py |
from torch import nn
import functools
class SequenceModule(nn.Module):
"""Abstract sequence model class. All models must adhere to this interface
A SequenceModule is generally a model that transforms an input of shape
(n_batch, l_sequence, d_model) to (n_batch, l_sequence, d_output)
REQUIRED methods and attributes
forward, d_model, d_output: controls standard forward pass, a sequence-to-sequence transformation
__init__ should also satisfy the following interface; see SequenceIdentity for an example
def __init__(self, d_model, transposed=False, **kwargs)
OPTIONAL methods
default_state, step: allows stepping the model recurrently with a hidden state
state_to_tensor, d_state: allows decoding from hidden state
"""
@property
def d_model(self):
"""Model dimension (generally same as input dimension).
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, encoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_model", None) is None:
raise NotImplementedError("SequenceModule instantiation must set d_model")
return self._d_model
@d_model.setter
def d_model(self, d):
self._d_model = d
@property
def d_output(self):
"""Output dimension of model.
This attribute is required for all SequenceModule instantiations.
It is used by the rest of the pipeline (e.g. model backbone, decoder) to track the internal shapes of the full model.
"""
if getattr(self, "_d_output", None) is None:
raise NotImplementedError("SequenceModule instantiation must specify d_output for decoder")
return self._d_output
@d_output.setter
def d_output(self, d):
self._d_output = d
def forward(self, x, state=None, **kwargs):
"""Forward pass of sequence model, a sequence-to-sequence transformation with an optional state.
Generally, this should map a tensor of shape (batch, length, self.d_model) to (batch, length, self.d_output)
Additionally, it returns a "state" which can be any additional information
For example, RNN and SSM layers may return their hidden state,
while some types of transformer layers (e.g. Transformer-XL) may want to pass a state as well
"""
return x, None
@property
def state_to_tensor(self):
"""Returns a function mapping a state to a single tensor.
This method should be implemented if one wants to use the hidden state instead of the output sequence for final prediction.
Currently only used with the StateDecoder.
"""
return lambda _: None
@property
def d_state(self):
""" Returns dimension of output of self.state_to_tensor """
return None
def default_state(self, *batch_shape, device=None):
"""Create initial state for a batch of inputs."""
return None
def step(self, x, state=None, **kwargs):
"""Step the model recurrently for one step of the input sequence.
For example, this should correspond to unrolling an RNN for one step.
If the forward pass has signature (B, L, H1) -> (B, L, H2),
this method should generally have signature (B, H1) -> (B, H2) with an optional recurrent state.
"""
raise NotImplementedError
def TransposedModule(module):
"""Wrap a SequenceModule class to accept transposed parameter, handle state, absorb kwargs"""
# https://stackoverflow.com/a/65470430/1980685
@functools.wraps(module, updated=())
class TransposedModule(module):
def __init__(self, *args, transposed=False, **kwargs):
super().__init__(*args, **kwargs)
self.transposed = transposed
def forward(self, x, state=None, **kwargs):
if self.transposed: x = x.transpose(-1, -2)
x, next_state = super().forward(x, state) # Don't use kwarg because nn.LSTM
next_state = None if state is None else next_state
if self.transposed: x = x.transpose(-1,-2)
return x, next_state
# https://stackoverflow.com/questions/5352781/how-to-set-class-names-dynamically
# TransposedModule.__name__ = module.__name__ # functools wraps is better solution
return TransposedModule
@TransposedModule
class SequenceIdentity(SequenceModule):
"""Simple SequenceModule for testing purposes"""
def __init__(self, d_model, dropout=0.0, **kwargs):
"""Default interface for SequenceModule
d_model: input dimension (sometimes denoted H for hidden dimension)
transposed: if True, inputs have axis ordering (B, H, L) instead of (B, H, L)
"""
super().__init__()
self.d_model = d_model
self.d_output = d_model
def forward(self, x, state=None):
return x, state
def default_state(self, *batch_shape, device=None):
return None
def step(self, x, state=None, **kwargs):
return x, state
| hyena-dna-main | src/models/sequence/base.py |
""" Wrapper around nn.MultiheadAttention to adhere to SequenceModule interface. """
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from src.models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
from einops import rearrange
@TransposedModule
class MultiheadAttention(SequenceModule):
""" Simple wrapper for MultiheadAttention """
def __init__(self, d_model, n_heads, *args, causal=True, **kwargs):
super().__init__()
self.d_model = d_model
self.d_output = d_model
self.mha = nn.MultiheadAttention(d_model, n_heads, *args, batch_first=True, **kwargs)
self.causal = causal
def forward(self, src, attn_mask=None, key_padding_mask=None, state=None, **kwargs):
""" state should represent a mask and key padding mask """
if self.causal and attn_mask is None:
attn_mask = torch.triu(torch.ones(src.size(-2), src.size(-2),
dtype=torch.bool, device=src.device),
diagonal=1)
# attn_mask, key_padding_mask = state
# Note that this returns None for the second argument
y, _ = self.mha(src, src, src, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
return y, None
def step(self, x, state):
# TODO proper cached inference
# x: (B, D)
pass
class VitAttention(SequenceModule):
"""Copied from implementation for ViT: only used for ViT model
This attention class makes several simplifying assumptions (commonly satisfied in vision
applications):
1. q = k = v
2. No masks: no attention mask, no key padding mask
3. Embed dimension = Input dimension, i.e. projection matrices are square.
"""
@property
def d_output(self):
return self.dim
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
# proj_drop=0.,
packed_linear=True,
linear_cfg=None,
**kwargs,
):
"""packed_linear: whether to pack all 3 q_proj, k_proj, v_proj into 2 matrix.
This option is to be compatible with T2T-ViT pretrained weights, where there's only one
projection weight matrix.
"""
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if linear_cfg is not None:
packed_linear = False
self.packed_linear = packed_linear
if packed_linear:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
else:
if linear_cfg is None:
linear_cfg = {'_target_': 'torch.nn.Linear'}
self.q_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.k_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.v_proj = hydra.utils.instantiate(linear_cfg, dim, dim, bias=qkv_bias,
_recursive_=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
# Removing this dropout because we do this in SequenceResidualBlock
# self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, state=None):
B, N, C = x.shape
if self.packed_linear:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
else:
q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)
q, k, v = [rearrange(x, 'b n (h d) -> b h n d', h=self.num_heads) for x in (q, k, v)]
# attn = (q @ k.transpose(-2, -1) * self.scale)
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = q.size()
_, _, k_seq_len, _ = k.size()
q = rearrange(q, 'b h t d -> (b h) t d')
k = rearrange(k, 'b h s d -> (b h) d s')
# Preallocate attn_weights for `baddbmm`
attn = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=q.dtype, device=q.device)
attn = rearrange(torch.baddbmm(attn, q, k, beta=0, alpha=self.scale),
'(b h) t s -> b h t s', h = self.num_heads)
attn = F.softmax(attn, dim=-1, dtype=v.dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
# x = self.proj_drop(x)
return x, None
| hyena-dna-main | src/models/sequence/mha.py |
import math
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
@torch.jit.script
def _mul_sum(y, q):
return (y * q).sum(dim=1)
# reference convolution with residual connection
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
if k_rev is not None:
k_rev_f = torch.fft.rfft(k_rev, n=fft_size) / fft_size
k_f = k_f + k_rev_f.conj()
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3: k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
if gelu:
out = F.gelu(out)
if dropout_mask is not None:
return (out * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
else:
return out.to(dtype=u.dtype)
# reference H3 forward pass
def fftconv_h3_ref(k, ssm_kernel, D, q, v, head_dim=1, ssm_kernel_rev=None):
seqlen = k.shape[-1]
fft_size = 2 * seqlen
kv = (rearrange(k, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
* rearrange(v, 'b (h d2) l -> b 1 d2 h l', d2=head_dim)) # b d1 d2 h l
kv_f = torch.fft.rfft(kv.to(dtype=ssm_kernel.dtype), n=fft_size) / fft_size
ssm_kernel_f = torch.fft.rfft(ssm_kernel, n=fft_size) # h L+1
if ssm_kernel_rev is not None:
ssm_kernel_rev_f = torch.fft.rfft(ssm_kernel_rev, n=fft_size) # h L+1
ssm_kernel_f = ssm_kernel_f + ssm_kernel_rev_f.conj()
y = torch.fft.irfft(kv_f * ssm_kernel_f, n=fft_size, norm='forward')[..., :seqlen] # b d1 d2 h l
out = y + kv * D.unsqueeze(-1) # b d1 d2 h l
q = rearrange(q, 'b (h d1) l -> b d1 1 h l', d1=head_dim)
if head_dim > 1:
out = _mul_sum(out, q)
return rearrange(out, 'b d2 h l -> b (h d2) l').to(dtype=k.dtype)
else:
return rearrange(out * q, 'b 1 1 h l -> b h l').to(dtype=k.dtype)
class FFTConvFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
k_f = torch.fft.rfft(k, n=fft_size)
if k_rev is not None:
k_f = k_f + torch.fft.rfft(k_rev, n=fft_size).conj()
if u.stride(-1) != 1:
u = u.contiguous()
k_f = k_f.contiguous()
D = D.contiguous()
if v is not None and v.stride(-1) != 1:
v = v.contiguous()
if q is not None and q.stride(-1) != 1:
q = q.contiguous()
if dropout_mask is not None:
dropout_mask = dropout_mask.contiguous()
ctx.save_for_backward(u, k_f, D, dropout_mask, v, q)
ctx.output_hbl_layout = output_hbl_layout
ctx.head_dim = head_dim
ctx.gelu = gelu
ctx.fftfp16 = fftfp16
ctx.has_k_rev = k_rev is not None
out = fftconv_fwd(u, k_f, D, v, head_dim, q, dropout_mask, gelu, False, False, fft_size, force_fp16_output, output_hbl_layout, fftfp16)
return out
@staticmethod
def backward(ctx, dout):
if ctx.output_hbl_layout:
dout = rearrange(rearrange(dout, 'b h l -> h b l').contiguous(), 'h b l -> b h l')
else:
dout = dout.contiguous()
u, k_f, D, dropout_mask, v, q = ctx.saved_tensors
seqlen = u.shape[-1]
fft_size = max(2 * 2 ** int(math.ceil(math.log2(seqlen))), 16)
du, dk_f, dD, dv, dq = fftconv_bwd(dout, u, k_f, D, v, ctx.head_dim, q, dropout_mask, ctx.gelu, False, False, fft_size,
ctx.output_hbl_layout, ctx.fftfp16)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
dk_rev = (None if not ctx.has_k_rev
else torch.fft.irfft(dk_f.conj(), n=fft_size, norm='forward')[..., :seqlen])
if v is not None:
dv = dv.to(dtype=v.dtype) # We do atomicAdd in fp32 so might need to convert to fp16
return du, dk, dD, None, None, None, None, dv if v is not None else None, None, dq if q is not None else None, None, dk_rev
def fftconv_func(u, k, D, dropout_mask=None, gelu=True, force_fp16_output=False,
output_hbl_layout=False, v=None, head_dim=1, q=None, fftfp16=False, k_rev=None):
return FFTConvFunc.apply(u, k, D, dropout_mask, gelu, force_fp16_output,
output_hbl_layout, v, head_dim, q, fftfp16, k_rev)
| hyena-dna-main | src/ops/fftconv.py |
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import math
import torch
from einops import rearrange, repeat
from opt_einsum import contract
import os
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
try:
from cauchy_mult import vand_log_mult_sym_fwd, vand_log_mult_sym_bwd
except:
vand_log_mult_sym_fwd, vand_log_mult_sym_bwd = None, None
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
if conj:
x = _conj(x)
v = _conj(v)
vandermonde_matrix = x.unsqueeze(-1) ** torch.arange(L).to(x) # (... N L)
vandermonde_prod = torch.sum(v.unsqueeze(-1) * vandermonde_matrix, dim=-2) # (... L)
return vandermonde_prod
def log_vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... n, ... n l -> ... l', v, vandermonde_matrix) # (... L)
if conj:
return 2*vandermonde_prod.real
else:
return vandermonde_prod
def log_vandermonde_lazy(v, x, L, conj=True):
if conj:
v = _conj(v)
x = _conj(x)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
x_l = LazyTensor(rearrange(x, '... N -> ... N 1 1'))
l_l = LazyTensor(rearrange(l, '... L -> ... 1 L 1'))
# exp
vand = (x_l * l_l).exp()
s = (v_l*vand).sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def log_vandermonde(v, x, L, conj=True):
expr = 'ComplexMult(v, ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'v = Vj(2)',
'x = Vj(2)',
'l = Vi(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(v, x, l, backend='GPU')
if conj:
return 2*_r2c(r).real
else:
return _r2c(r)
def log_vandermonde_transpose_naive(u, v, x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... l, ... n, ... n l -> ... n', u.to(x), v.to(x), vandermonde_matrix) # (... L)
return vandermonde_prod
def log_vandermonde_transpose(u, v, x, L):
"""
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = 'ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'u = Vj(2)',
'v = Vi(2)',
'x = Vi(2)',
'l = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
u, v, x, l = _broadcast_dims(u, v, x, l)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(u, v, x, l, backend='GPU')
return _r2c(r)
def _log_vandermonde_matmul(x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
return vandermonde_matrix
def log_vandermonde_matmul(v, K):
prod = contract('...n, ...nl -> ...l', v, K)
return 2*prod.real
class LogVandMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, x, L):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
if not N in supported_N_values:
raise NotImplementedError(f'Only support N values in {supported_N_values}')
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError(f'Only support L values <= {max_L_value}')
if not v.is_cuda and x.is_cuda:
raise NotImplementedError(f'Only support CUDA tensors')
ctx.save_for_backward(v, x)
return vand_log_mult_sym_fwd(v, x, L)
@staticmethod
def backward(ctx, dout):
v, x = ctx.saved_tensors
dv, dx = vand_log_mult_sym_bwd(v, x, dout)
return dv, dx, None
if vand_log_mult_sym_fwd and vand_log_mult_sym_bwd is not None:
log_vandermonde_fast = LogVandMultiplySymmetric.apply
else:
log_vandermonde_fast = None | hyena-dna-main | src/ops/vandermonde.py |
""" Old utilities for parallel scan implementation of Linear RNNs. """
# TODO this file could use much cleanup
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from src.models.functional.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
from src.utils.permutations import bitreversal_po2, bitreversal_permutation
### Utilities
def shift_up(a, s=None, drop=True, dim=0):
assert dim == 0
if s is None:
s = torch.zeros_like(a[0, ...])
s = s.unsqueeze(dim)
if drop:
a = a[:-1, ...]
return torch.cat((s, a), dim=dim)
def interleave(a, b, uneven=False, dim=0):
""" Interleave two tensors of same shape """
# assert(a.shape == b.shape)
assert dim == 0 # TODO temporary to make handling uneven case easier
if dim < 0:
dim = N + dim
if uneven:
a_ = a[-1:, ...]
a = a[:-1, ...]
c = torch.stack((a, b), dim+1)
out_shape = list(a.shape)
out_shape[dim] *= 2
c = c.view(out_shape)
if uneven:
c = torch.cat((c, a_), dim=dim)
return c
def batch_mult(A, u, has_batch=None):
""" Matrix mult A @ u with special case to save memory if u has additional batch dim
The batch dimension is assumed to be the second dimension
A : (L, ..., N, N)
u : (L, [B], ..., N)
has_batch: True, False, or None. If None, determined automatically
Output:
x : (L, [B], ..., N)
A @ u broadcasted appropriately
"""
if has_batch is None:
has_batch = len(u.shape) >= len(A.shape)
if has_batch:
u = u.permute([0] + list(range(2, len(u.shape))) + [1])
else:
u = u.unsqueeze(-1)
v = (A @ u)
if has_batch:
v = v.permute([0] + [len(u.shape)-1] + list(range(1, len(u.shape)-1)))
else:
v = v[..., 0]
return v
### Main unrolling functions
def unroll(A, u):
"""
A : (..., N, N) # TODO I think this can't take batch dimension?
u : (L, ..., N)
output : x (..., N) # TODO a lot of these shapes are wrong
x[i, ...] = A^{i} @ u[0, ...] + ... + A @ u[i-1, ...] + u[i, ...]
"""
m = u.new_zeros(u.shape[1:])
outputs = []
for u_ in torch.unbind(u, dim=0):
m = F.linear(m, A) + u_
outputs.append(m)
output = torch.stack(outputs, dim=0)
return output
def parallel_unroll_recursive(A, u):
""" Bottom-up divide-and-conquer version of unroll. """
# Main recursive function
def parallel_unroll_recursive_(A, u):
if u.shape[0] == 1:
return u
u_evens = u[0::2, ...]
u_odds = u[1::2, ...]
# u2 = F.linear(u_evens, A) + u_odds
u2 = (A @ u_evens.unsqueeze(-1)).squeeze(-1) + u_odds
A2 = A @ A
x_odds = parallel_unroll_recursive_(A2, u2)
# x_evens = F.linear(shift_up(x_odds), A) + u_evens
x_evens = (A @ shift_up(x_odds).unsqueeze(-1)).squeeze(-1) + u_evens
x = interleave(x_evens, x_odds, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
return parallel_unroll_recursive_(A, u)[:n, ...]
def parallel_unroll_recursive_br(A, u):
""" Same as parallel_unroll_recursive but uses bit reversal for locality. """
# Main recursive function
def parallel_unroll_recursive_br_(A, u):
n = u.shape[0]
if n == 1:
return u
m = n//2
u_0 = u[:m, ...]
u_1 = u[m:, ...]
u2 = F.linear(u_0, A) + u_1
A2 = A @ A
x_1 = parallel_unroll_recursive_br_(A2, u2)
x_0 = F.linear(shift_up(x_1), A) + u_0
# x = torch.cat((x_0, x_1), dim=0) # is there a way to do this with cat?
x = interleave(x_0, x_1, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
x = parallel_unroll_recursive_br_(A, u)
return x[:n, ...]
def parallel_unroll_iterative(A, u):
""" Bottom-up divide-and-conquer version of unroll, implemented iteratively """
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
# Main recursive loop, flattened
us = [] # stores the u_0 terms in the recursive version
N_ = N
As = [] # stores the A matrices
for l in range(m):
N_ = N_ // 2
As.append(A)
u_0 = u[:N_, ...]
us.append(u_0)
u = F.linear(u_0, A) + u[N_:, ...]
A = A @ A
x_0 = []
x = u # x_1
for l in range(m-1, -1, -1):
x_0 = F.linear(shift_up(x), As[l]) + us[l]
x = interleave(x_0, x, dim=0)
return x[:n, ...]
def variable_unroll_sequential(A, u, s=None, variable=True):
""" Unroll with variable (in time/length) transitions A.
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] @ s + A[i..1] @ u[0] + ... + A[i] @ u[i-1] + u[i]
"""
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
has_batch = len(u.shape) >= len(A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
# s = F.linear(s, A_) + u_
s = batch_mult(A_.unsqueeze(0), s.unsqueeze(0), has_batch)[0]
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll(A, u, s=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll. """
if u.shape[0] <= recurse_limit:
return variable_unroll_sequential(A, u, s, variable)
if s is None:
s = torch.zeros_like(u[0])
uneven = u.shape[0] % 2 == 1
has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = A_1 @ A_0_
# Recursive call
x_1 = variable_unroll(A_10, u_10, s, variable, recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_general_sequential(A, u, s, op, variable=True):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
if not variable:
A = A.expand((u.shape[0],) + A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
s = op(A_, s)
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
def variable_unroll_matrix_sequential(A, u, s=None, variable=True):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
return variable_unroll_general_sequential(A, u, s, op, variable=True)
def variable_unroll_toeplitz_sequential(A, u, s=None, variable=True, pad=False):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
ret = variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply_padded, variable=True)
ret = ret[..., :n]
return ret
return variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply, variable=True)
### General parallel scan functions with generic binary composition operators
def variable_unroll_general(A, u, s, op, compose_op=None, sequential_op=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll.
compose is an optional function that defines how to compose A without multiplying by a leaf u
"""
if u.shape[0] <= recurse_limit:
if sequential_op is None:
sequential_op = op
return variable_unroll_general_sequential(A, u, s, sequential_op, variable)
if compose_op is None:
compose_op = op
uneven = u.shape[0] % 2 == 1
# has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = op(A_1, u_0_) # batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = compose_op(A_1, A_0_)
# Recursive call
x_1 = variable_unroll_general(A_10, u_10, s, op, compose_op, sequential_op, variable=variable, recurse_limit=recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = op(A_0, x_0) # batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
def variable_unroll_matrix(A, u, s=None, variable=True, recurse_limit=16):
if s is None:
s = torch.zeros_like(u[0])
has_batch = len(u.shape) >= len(A.shape)
op = lambda x, y: batch_mult(x, y, has_batch)
sequential_op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
matmul = lambda x, y: x @ y
return variable_unroll_general(A, u, s, op, compose_op=matmul, sequential_op=sequential_op, variable=variable, recurse_limit=recurse_limit)
def variable_unroll_toeplitz(A, u, s=None, variable=True, recurse_limit=8, pad=False):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (L, [B], ..., N) same shape as u
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
# Add the batch dimension to A if necessary
A_batch_dims = len(A.shape) - int(variable)
u_batch_dims = len(u.shape)-1
if u_batch_dims > A_batch_dims:
# assert u_batch_dims == A_batch_dims + 1
if variable:
while len(A.shape) < len(u.shape):
A = A.unsqueeze(1)
# else:
# A = A.unsqueeze(0)
if s is None:
s = torch.zeros_like(u[0])
if pad:
n = A.shape[-1]
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
op = triangular_toeplitz_multiply_padded
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
ret = ret[..., :n]
return ret
op = triangular_toeplitz_multiply
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
return ret
| hyena-dna-main | src/ops/unroll.py |
""" Compute a Krylov function efficiently. (S4 renames the Krylov function to a "state space kernel")
A : (N, N)
b : (N,)
c : (N,)
Return: [c^T A^i b for i in [L]]
"""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from src.ops.toeplitz import causal_convolution
def krylov_sequential(L, A, b, c=None):
""" Constant matrix A
A : (..., N, N)
b : (..., N)
c : (..., N)
Returns
if c:
x : (..., L)
x[i, l] = c[i] @ A^l @ b[i]
else:
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
# Check which of dim b and c is smaller to save memory
if c is not None and c.numel() < b.numel():
return krylov_sequential(L, A.transpose(-1, -2), c, b)
b_ = b
x = []
for _ in range(L):
if c is not None:
x_ = torch.sum(c*b_, dim=-1) # (...) # could be faster with matmul or einsum?
else:
x_ = b_
x.append(x_)
b_ = (A @ b_.unsqueeze(-1)).squeeze(-1)
x = torch.stack(x, dim=-1)
return x
def krylov(L, A, b, c=None, return_power=False):
"""
Compute the Krylov matrix (b, Ab, A^2b, ...) using the squaring trick.
If return_power=True, return A^{L-1} as well
"""
# TODO There is an edge case if L=1 where output doesn't get broadcasted, which might be an issue if caller is expecting broadcasting semantics... can deal with it if it arises
x = b.unsqueeze(-1) # (..., N, 1)
A_ = A
AL = None
if return_power:
AL = torch.eye(A.shape[-1], dtype=A.dtype, device=A.device)
_L = L-1
done = L == 1
# loop invariant: _L represents how many indices left to compute
while not done:
if return_power:
if _L % 2 == 1: AL = A_ @ AL
_L //= 2
# Save memory on last iteration
l = x.shape[-1]
if L - l <= l:
done = True
_x = x[..., :L-l]
else: _x = x
_x = A_ @ _x
x = torch.cat([x, _x], dim=-1) # there might be a more efficient way of ordering axes
if not done: A_ = A_ @ A_
assert x.shape[-1] == L
if c is not None:
x = torch.einsum('...nl, ...n -> ...l', x, c)
x = x.contiguous() # WOW!!
if return_power:
return x, AL
else:
return x
@torch.no_grad()
def power(L, A, v=None):
""" Compute A^L and the scan sum_i A^i v_i
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
if v is None:
powers = [powers[-1] @ powers[-1]]
else:
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
def krylov_toeplitz(L, A, b, c=None):
""" Specializes to lower triangular Toeplitz matrix A represented by its diagonals
A : (..., N)
b : (..., N)
c : (..., N)
Returns
x : (..., N, L)
x[i, l] = A^l @ b[i]
"""
x = b.unsqueeze(0) # (1, ..., N)
A_ = A
while x.shape[0] < L:
xx = causal_convolution(A_, x)
x = torch.cat([x, xx], dim=0) # there might be a more efficient way of ordering axes
A_ = causal_convolution(A_, A_)
x = x[:L, ...] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
def krylov_toeplitz_(L, A, b, c=None):
""" Padded version of krylov_toeplitz that saves some fft's
TODO currently not faster than original version, not sure why
"""
N = A.shape[-1]
x = b.unsqueeze(0) # (1, ..., N)
x = F.pad(x, (0, N))
A = F.pad(A, (0, N))
done = L == 1
while not done:
l = x.shape[0]
# Save memory on last iteration
if L - l <= l:
done = True
_x = x[:L-l]
else: _x = x
Af = torch.fft.rfft(A, n=2*N, dim=-1)
xf = torch.fft.rfft(_x, n=2*N, dim=-1)
xf_ = Af * xf
x_ = torch.fft.irfft(xf_, n=2*N, dim=-1)
x_[..., N:] = 0
x = torch.cat([x, x_], dim=0) # there might be a more efficient way of ordering axes
if not done:
A = torch.fft.irfft(Af*Af, n=2*N, dim=-1)
A[..., N:] = 0
x = x[:L, ..., :N] # (L, ..., N)
if c is not None:
x = torch.einsum('l...n, ...n -> ...l', x, c)
else:
x = rearrange(x, 'l ... n -> ... n l')
x = x.contiguous()
return x
| hyena-dna-main | src/ops/krylov.py |
""" Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
n = v.shape[-1]
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, None] + b[None]
K = v[..., indices]
K[..., indices < 0] *= f
return K
def triangular_toeplitz_multiply_(u, v, sum=None):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
uv_f = u_f * v_f
if sum is not None:
uv_f = uv_f.sum(dim=sum)
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
def triangular_toeplitz_multiply_padded_(u, v):
""" Same as triangular_toeplitz_multiply but inputs and output assume to be 0-padded already. """
n = u.shape[-1]
assert n % 2 == 0
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n:] = 0
return output
class TriangularToeplitzMult(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
return triangular_toeplitz_multiply_(u, v)
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultFast(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_expand = F.pad(u, (0, n))
v_expand = F.pad(v, (0, n))
u_f = torch.fft.rfft(u_expand, n=2*n, dim=-1)
v_f = torch.fft.rfft(v_expand, n=2*n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=2*n, dim=-1)[..., :n]
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad.flip(-1), (0, n))
g_f = torch.fft.rfft(g_expand, n=2*n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=2*n, dim=-1)[..., :n]
d_v = torch.fft.irfft(gu_f, n=2*n, dim=-1)[..., :n]
d_u = d_u.flip(-1)
d_v = d_v.flip(-1)
return d_u, d_v
class TriangularToeplitzMultPadded(torch.autograd.Function):
@staticmethod
def forward(ctx, u, v):
ctx.save_for_backward(u, v)
output = triangular_toeplitz_multiply_(u, v)
return output
@staticmethod
def backward(ctx, grad):
u, v = ctx.saved_tensors
d_u = triangular_toeplitz_multiply_padded_(grad.flip(-1), v).flip(-1)
d_v = triangular_toeplitz_multiply_padded_(grad.flip(-1), u).flip(-1)
return d_u, d_v
class TriangularToeplitzMultPaddedFast(torch.autograd.Function):
""" Trade off speed (20-25% faster) for more memory (20-25%) """
@staticmethod
def forward(ctx, u, v):
n = u.shape[-1]
u_f = torch.fft.rfft(u, n=n, dim=-1)
v_f = torch.fft.rfft(v, n=n, dim=-1)
ctx.save_for_backward(u_f, v_f)
uv_f = u_f * v_f
output = torch.fft.irfft(uv_f, n=n, dim=-1)
output[..., n//2:].zero_()
return output
@staticmethod
def backward(ctx, grad):
u_f, v_f = ctx.saved_tensors
n = grad.shape[-1]
g_expand = F.pad(grad[..., :n//2].flip(-1), (0, n//2))
g_f = torch.fft.rfft(g_expand, n=n, dim=-1)
gu_f = g_f * u_f
gv_f = g_f * v_f
d_u = torch.fft.irfft(gv_f, n=n, dim=-1)
d_v = torch.fft.irfft(gu_f, n=n, dim=-1)
d_u[..., n//2:].zero_()
d_v[..., n//2:].zero_()
d_u[..., :n//2] = d_u[..., :n//2].flip(-1) # TODO
d_v[..., :n//2] = d_v[..., :n//2].flip(-1) # TODO
return d_u, d_v
# triangular_toeplitz_multiply = triangular_toeplitz_multiply_
triangular_toeplitz_multiply = TriangularToeplitzMult.apply
triangular_toeplitz_multiply_fast = TriangularToeplitzMultFast.apply
triangular_toeplitz_multiply_padded = TriangularToeplitzMultPadded.apply
triangular_toeplitz_multiply_padded_fast = TriangularToeplitzMultPaddedFast.apply
def causal_convolution(u, v, fast=True, pad=False):
if not pad and not fast:
return triangular_toeplitz_multiply(u, v)
if not pad and fast:
return triangular_toeplitz_multiply_fast(u, v)
if pad and not fast:
return triangular_toeplitz_multiply_padded(u, v)
if pad and fast:
return triangular_toeplitz_multiply_padded_fast(u, v)
| hyena-dna-main | src/ops/toeplitz.py |
"""Training example.
Example usage:
python -u main.py \
--dataset=cc3m --val-dataset=cc3m \
--opt-version='facebook/opt-6.7b' --visual-model='openai/clip-vit-large-patch14' \
--exp_name='gill_exp' --log-base-dir='runs/' \
--batch-size=64 --val-batch-size=64 --precision='bf16'
Example run on 2 A6000 GPUs to reproduce the paper results:
randport=$(shuf -i8000-9999 -n1) # Generate a random port number
python -u main.py \
--dist-url "tcp://127.0.0.1:${randport}" --dist-backend 'nccl' \
--multiprocessing-distributed --world-size 1 --rank 0 \
--dataset=cc3m --val-dataset=cc3m \
--exp-name='gill_exp' --image-dir='data/' --log-base-dir='runs/' \
--precision='bf16' --print-freq=100
"""
import argparse
from collections import OrderedDict
import json
import os
import random
import sys
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.optim.lr_scheduler import StepLR
from warmup_scheduler import GradualWarmupScheduler
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
from torch.utils.tensorboard import SummaryWriter
import torchvision
from transformers import AutoTokenizer
from gill import data
from gill import losses as losses_utils
from gill import models
from gill import utils
from gill import validate
llm_models = ['facebook/opt-125m', 'facebook/opt-350m', 'facebook/opt-1.3b', 'facebook/opt-2.7b',
'facebook/opt-6.7b', 'facebook/opt-13b', 'facebook/opt-30b', 'facebook/opt-66b']
datasets = ['cc3m']
best_acc1 = 0 # Variable to keep track of best model so far.
def parse_args(args):
parser = argparse.ArgumentParser(description='GILL training')
parser.add_argument('--opt-version', default='facebook/opt-6.7b',
choices=llm_models,
help='OPT versions: ' +
' | '.join(llm_models) +
' (default: "facebook/opt-6.7b")')
parser.add_argument('--visual-model', default='openai/clip-vit-large-patch14', type=str,
help="Visual encoder to use.")
parser.add_argument('--num-tokens', default=8, type=int, metavar='N', help='Number of [IMG] tokens to use.')
parser.add_argument('--num-clip-tokens', default=77, type=int, metavar='N', help='Number of CLIP token to use for generation.')
parser.add_argument('-d', '--dataset', metavar='DATASET', help='Delimited list of datasets:' +
' | '.join(datasets), default='cc3.1m',
type=lambda s: [x for x in s.split(',')])
parser.add_argument('--val-dataset', metavar='DATASET', default='cc3.1m',
type=lambda s: [x for x in s.split(',')],
help='Validation dataset: ' +
' | '.join(datasets) +
' (default: cc3.1m)')
parser.add_argument('--dataset-dir', default='datasets', type=str,
help='Dataset directory containing .tsv files.')
parser.add_argument('--image-dir', default='data/', type=str,
help='Dataset directory containing image folders.')
parser.add_argument('--log-base-dir', default='./runs', type=str,
help='Base directory to write logs and ckpts to.')
parser.add_argument('--exp-name', default='frozen', type=str,
help='Name of experiment, used for saving checkpoints.')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--steps_per_epoch', default=2000, type=int, metavar='N',
help='number of training steps per epoch')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--val_steps_per_epoch', default=-1, type=int, metavar='N',
help='number of validation steps per epoch')
parser.add_argument('-b', '--batch-size', default=200, type=int,
metavar='N',
help='mini-batch size (default: 200), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--val-batch-size', default=None, type=int)
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lr-warmup-steps', default=2000, type=int,
metavar='N', help='Number of steps to warm up lr.')
parser.add_argument('--lr_schedule_step_size', default=5, type=int,
metavar='N', help='Number of steps before decaying lr.')
parser.add_argument('--lr_schedule_gamma', default=0.1, type=float,
metavar='N', help='Decay parameter for learning rate scheduler.')
parser.add_argument('--grad-accumulation-steps', default=1, type=int, metavar='N',
help='number of gradient accumulation steps')
parser.add_argument('--grad-clip', default=1.0, type=float, help='gradient clipping amount')
parser.add_argument('--precision', default='bf16', type=str, choices=['fp32', 'fp16', 'bf16'],
help="What precision to train in.")
parser.add_argument('--cap-loss-scale', type=float, default=1.0, help="Scale on captioning loss.")
parser.add_argument('--ret-loss-scale', type=float, default=1.0, help="Scale on retrieval loss.")
parser.add_argument('--gen-loss-scale', type=float, default=1.0, help="Scale on retrieval loss.")
parser.add_argument('--concat-captions-prob', type=float, default=0.5, help="Probability of concatenating two examples sequentially for captioning.")
parser.add_argument('--input-prompt', default='A picture of', type=str, help="Input prompt for the language model, if any.")
parser.add_argument('--image-size', default=224, type=int, metavar='N', help='Size of images.')
parser.add_argument('--ret-emb-dim', default=256, type=int, metavar='N', help='Embedding dimension for retrieval.')
parser.add_argument('--gen-emb-dim', default=768, type=int, metavar='N', help='Embedding dimension for generation.')
text_fc_modes = ['linear', 'gill_mapper']
parser.add_argument('--text-fc-mode', default='gill_mapper',
choices=text_fc_modes, help='What kind of translation mapping to use.')
parser.add_argument('--ret-text-fc-mode', default='linear',
choices=text_fc_modes, help='What kind of translation mapping to use.')
parser.add_argument('--max-len', default=32, type=int,
metavar='N', help='Maximum length to truncate captions / generations to.')
parser.add_argument('--n-visual-tokens', default=4, type=int,
metavar='N', help='Number of visual tokens to use for the Frozen model.')
parser.add_argument('--beta1', default=0.9, type=float, metavar='M',
help='beta1 for Adam')
parser.add_argument('--beta2', default=0.95, type=float, metavar='M',
help='beta2 for Adam')
parser.add_argument('--wd', '--weight-decay', default=0.01, type=float,
metavar='W', help='weight decay (default: 0.01)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:1337', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
return parser.parse_args(args)
def main(args):
args = parse_args(args)
i = 1
args.log_dir = os.path.join(args.log_base_dir, args.exp_name)
while os.path.exists(args.log_dir):
args.log_dir = os.path.join(args.log_base_dir, f'{args.exp_name}_{i}')
i += 1
os.makedirs(args.log_dir)
with open(os.path.join(args.log_dir, f'args.json'), 'w') as wf:
json.dump(vars(args), wf, indent=4)
with open(os.path.join(args.log_dir, f'git_info.txt'), 'w') as wf:
utils.dump_git_status(out_file=wf)
print(f'Logging to {args.log_dir}.')
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# Create model
model_args = models.GILLArgs()
model_args.opt_version = args.opt_version
model_args.visual_encoder = args.visual_model
model_args.freeze_lm = True
model_args.freeze_vm = True
model_args.n_visual_tokens = args.n_visual_tokens
model_args.ret_emb_dim = args.ret_emb_dim
model_args.gen_emb_dim = args.gen_emb_dim
model_args.text_fc_mode = args.text_fc_mode
model_args.ret_text_fc_mode = args.ret_text_fc_mode
model_args.num_tokens = args.num_tokens
model_args.num_clip_tokens = args.num_clip_tokens
assert args.num_tokens == 0 or 'gill_mapper' in model_args.text_fc_mode or (args.num_tokens * args.gen_emb_dim == args.num_clip_tokens * 768 or args.num_tokens * args.gen_emb_dim == args.num_clip_tokens * 1024), (f'{args.num_tokens} * {args.gen_emb_dim} != {args.num_clip_tokens} * 768 (or 1024)')
tokenizer = AutoTokenizer.from_pretrained(args.opt_version, use_fast=False)
if tokenizer.pad_token is None:
if args.opt_version in ['EleutherAI/gpt-j-6B']:
tokenizer.pad_token = tokenizer.eos_token
else:
tokenizer.pad_token_id = tokenizer.eos_token_id
print("tokenizer.pad_token, tokenizer.eos_token:", tokenizer.pad_token, tokenizer.eos_token)
# Add an image token for loss masking (and visualization) purposes.
tokenizer.add_special_tokens({"cls_token": "<|image|>"}) # add special image token to tokenizer
# Add [IMG] tokens to the vocabulary.
model_args.retrieval_token_idx = []
args.retrieval_token_idx = []
for i in range(model_args.num_tokens):
print(f'Adding [IMG{i}] token to vocabulary.')
print(f'Before adding new token, tokenizer("[IMG{i}]") =', tokenizer(f'[IMG{i}]', add_special_tokens=False))
num_added_tokens = tokenizer.add_tokens(f'[IMG{i}]')
print(f'After adding {num_added_tokens} new tokens, tokenizer("[IMG{i}]") =', tokenizer(f'[IMG{i}]', add_special_tokens=False))
ret_token_idx = tokenizer(f'[IMG{i}]', add_special_tokens=False).input_ids
assert len(ret_token_idx) == 1, ret_token_idx
model_args.retrieval_token_idx.append(ret_token_idx[0])
args.retrieval_token_idx.append(ret_token_idx[0])
# Add [IMG] tokens to the vocabulary.
model_args.gen_token_idx = model_args.retrieval_token_idx
args.gen_token_idx = args.retrieval_token_idx
# Save model args to disk.
with open(os.path.join(args.log_dir, 'model_args.json'), 'w') as f:
json.dump(vars(model_args), f, indent=4)
model = models.GILL(tokenizer, model_args)
if args.precision == 'fp16':
model = model.float()
elif args.precision == 'bf16':
model = model.bfloat16()
# Print parameters and count of model.
param_counts_text = utils.get_params_count_str(model)
with open(os.path.join(args.log_dir, 'param_count.txt'), 'w') as f:
f.write(param_counts_text)
# Log trainable parameters to Tensorboard.
_, total_trainable_params, total_nontrainable_params = utils.get_params_count(model)
writer = SummaryWriter(args.log_dir)
writer.add_scalar('params/total', total_trainable_params + total_nontrainable_params, 0)
writer.add_scalar('params/total_trainable', total_trainable_params, 0)
writer.add_scalar('params/total_non_trainable', total_nontrainable_params, 0)
writer.close()
if not torch.cuda.is_available():
print('WARNING: using CPU, this will be slow!')
model = torch.nn.DataParallel(model)
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs of the current node.
args.batch_size = int(args.batch_size / ngpus_per_node)
args.val_batch_size = int((args.val_batch_size or args.batch_size) / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=False)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion), optimizer, and learning rate scheduler
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer_cls = torch.optim.AdamW
print('Using torch.optim.AdamW as the optimizer.')
optimizer = optimizer_cls(model.parameters(), args.lr,
betas=(args.beta1, args.beta2),
weight_decay=args.weight_decay,
eps=1e-8)
"""Sets the learning rate to the initial LR decayed by 10 every 5 epochs"""
scheduler_steplr = StepLR(optimizer, step_size=args.lr_schedule_step_size * args.steps_per_epoch, gamma=args.lr_schedule_gamma)
scheduler = GradualWarmupScheduler(optimizer, multiplier=1.0, total_epoch=args.lr_warmup_steps, after_scheduler=scheduler_steplr)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'], strict=False)
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
train_dataset = data.get_dataset(args, 'train', tokenizer)
val_dataset = data.get_dataset(args, 'val', tokenizer)
print(f'Training with {len(train_dataset)} examples and validating with {len(val_dataset)} examples.')
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, drop_last=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=(args.val_batch_size or args.batch_size), shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=val_sampler)
if args.evaluate:
validate.validate(val_loader, model, tokenizer, criterion, epoch, args)
return
for epoch in range(args.start_epoch, args.epochs):
if epoch == 0:
validate.validate(val_loader, model, tokenizer, criterion, epoch-1, args)
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, tokenizer, criterion, optimizer, epoch, scheduler, args)
# evaluate on validation set
acc1 = validate.validate(val_loader, model, tokenizer, criterion, epoch, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
# Only save non-frozen parameters.
stripped_state_dict = {
k: v for k, v in model.state_dict().items() if
('.lm' not in k and '.visual_model' not in k)
}
stripped_state_dict = OrderedDict(sorted(stripped_state_dict.items()))
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': stripped_state_dict,
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : scheduler.state_dict()
}, is_best, os.path.join(args.log_dir, 'ckpt'))
def train(train_loader, model, tokenizer, criterion, optimizer, epoch, scheduler, args):
ngpus_per_node = torch.cuda.device_count()
batch_time = utils.AverageMeter('Time', ':6.3f')
cap_time = utils.AverageMeter('CaptioningTime', ':6.3f')
ret_time = utils.AverageMeter('RetrievalTime', ':6.3f')
data_time = utils.AverageMeter('Data', ':6.3f')
losses = utils.AverageMeter('Loss', ':.4e')
ce_losses = utils.AverageMeter('CeLoss', ':.4e')
top1 = utils.AverageMeter('Acc@1', ':6.2f')
top5 = utils.AverageMeter('Acc@5', ':6.2f')
cont_losses = utils.AverageMeter('ContLoss', ':.4e')
gen_losses = utils.AverageMeter('GenLoss', ':.4e')
top1_caption = utils.AverageMeter('AccCaption@1', ':6.2f')
top5_caption = utils.AverageMeter('AccCaption@5', ':6.2f')
top1_image = utils.AverageMeter('AccImage@1', ':6.2f')
top5_image = utils.AverageMeter('AccImage@5', ':6.2f')
cap_vis_emb_norm = utils.AverageMeter('VisualEmbNormCap', ':.4e')
ret_vis_emb_norm = utils.AverageMeter('VisualEmbNormRet', ':.4e')
inp_emb_norm = utils.AverageMeter('TextEmbNorm', ':.4e')
all_emb_norm = utils.AverageMeter('AllEmbNorm', ':.4e')
ret_emb_norm = utils.AverageMeter('RetEmbNorm', ':.4e')
writer = SummaryWriter(args.log_dir)
progress = utils.ProgressMeter(
args.steps_per_epoch,
[batch_time, losses, ce_losses, cont_losses, gen_losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (_, images, caption_images, ret_tokens, ret_caption_len, gen_tokens, gen_caption_len, clip_emb) in enumerate(train_loader):
actual_step = epoch * args.steps_per_epoch + i + 1
# measure data loading time
data_time.update(time.time() - end)
if torch.cuda.is_available():
images = images.cuda(args.gpu, non_blocking=True)
ret_tokens = ret_tokens.cuda(args.gpu, non_blocking=True)
ret_caption_len = ret_caption_len.cuda(args.gpu, non_blocking=True)
gen_tokens = gen_tokens.cuda(args.gpu, non_blocking=True)
gen_caption_len = gen_caption_len.cuda(args.gpu, non_blocking=True)
clip_emb = clip_emb.cuda(args.gpu, non_blocking=True)
if args.precision == 'fp16':
images = images.half()
elif args.precision == 'bf16':
images = images.bfloat16()
model_modes = ['captioning', 'retrieval', 'generation']
loss = 0
for model_mode in model_modes:
print('Running', model_mode)
mode_start = time.time()
# compute output
concat_captions = random.uniform(0, 1) < args.concat_captions_prob
if model_mode == 'retrieval':
tgt_tokens, token_len = ret_tokens, ret_caption_len
elif model_mode == 'generation':
tgt_tokens, token_len = gen_tokens, gen_caption_len
else:
tgt_tokens, token_len = ret_tokens, ret_caption_len # For captioning, it doesn't matter.
(model_output, full_labels, last_embedding, _, visual_embs, visual_embs_norm,
input_embs_norm, _) = model(images, tgt_tokens, token_len, mode=model_mode,
concat_captions=concat_captions)
output = model_output.logits
# Measure captioning accuracy for multi-task models and next-token prediction for retrieval models.
if model_mode == 'captioning':
acc1, acc5 = utils.accuracy(output[:, :-1, :], full_labels[:, 1:], -100, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
ce_loss = model_output.loss
if model_mode == 'captioning':
ce_loss = ce_loss * args.cap_loss_scale
elif model_mode == 'retrieval':
ce_loss = ce_loss * args.ret_loss_scale * 0.5
elif model_mode == 'generation':
ce_loss = ce_loss * args.gen_loss_scale * 0.5
else:
raise NotImplementedError
loss += ce_loss
ce_losses.update(ce_loss.item(), images.size(0))
if model_mode == 'retrieval':
# Cross replica concat for embeddings.
if args.distributed:
all_visual_embs = [torch.zeros_like(visual_embs) for _ in range(dist.get_world_size())]
all_last_embedding = [torch.zeros_like(last_embedding) for _ in range(dist.get_world_size())]
dist.all_gather(all_visual_embs, visual_embs)
dist.all_gather(all_last_embedding, last_embedding)
# Overwrite with embeddings produced on this replace, which have the gradient.
all_visual_embs[dist.get_rank()] = visual_embs
all_last_embedding[dist.get_rank()] = last_embedding
visual_embs = torch.cat(all_visual_embs)
last_embedding = torch.cat(all_last_embedding)
start_idx = args.rank * images.shape[0]
end_idx = start_idx + images.shape[0]
print(visual_embs.shape, last_embedding.shape)
logits_per_image = visual_embs @ last_embedding.t()
logits_per_text = logits_per_image.t()
if i == 0:
print(f'Running contrastive loss over logits_per_text.shape = {logits_per_text.shape} and logits_per_image.shape = {logits_per_image.shape}')
caption_loss = losses_utils.contrastive_loss(logits_per_text)
image_loss = losses_utils.contrastive_loss(logits_per_image)
caption_acc1, caption_acc5 = losses_utils.contrastive_acc(logits_per_text, topk=(1, 5))
image_acc1, image_acc5 = losses_utils.contrastive_acc(logits_per_image, topk=(1, 5))
loss += args.ret_loss_scale * (caption_loss + image_loss) / 2.0
cont_losses.update(loss.item(), images.size(0))
# measure accuracy and record loss
top1_caption.update(caption_acc1[0], images.size(0))
top5_caption.update(caption_acc5[0], images.size(0))
top1_image.update(image_acc1[0], images.size(0))
top5_image.update(image_acc5[0], images.size(0))
elif model_mode == 'generation':
if args.num_tokens != 0 and args.num_clip_tokens != args.num_tokens:
seq_len = clip_emb.shape[1]
last_embedding = last_embedding.reshape((last_embedding.shape[0], seq_len, -1))
assert last_embedding.shape == clip_emb.shape, (last_embedding.shape == clip_emb.shape)
image_loss = losses_utils.l2_loss(clip_emb, last_embedding) # (N,)
gen_loss = args.gen_loss_scale * image_loss.mean()
loss += gen_loss
gen_losses.update(gen_loss.item(), images.size(0))
if model_mode == 'retrieval':
ret_vis_emb_norm.update(visual_embs_norm.item(), images.size(0))
elif model_mode == 'captioning':
cap_vis_emb_norm.update(visual_embs_norm.item(), images.size(0))
inp_emb_norm.update(input_embs_norm.item(), images.size(0))
if model_mode in ['retrieval', 'generation']:
ret_time.update(time.time() - mode_start)
elif model_mode == 'captioning':
cap_time.update(time.time() - mode_start)
loss = loss / args.grad_accumulation_steps
losses.update(loss.item(), images.size(0))
loss.backward()
# Update weights
if ((i + 1) % args.grad_accumulation_steps == 0) or (i == args.steps_per_epoch - 1):
# Zero out gradients of the embedding matrix outside of [IMG].
for param in model.module.model.input_embeddings.parameters():
assert param.grad.shape[0] == len(tokenizer)
# Keep other embeddings frozen.
mask = torch.zeros((param.grad.shape[0], 1)).to(param.grad)
for ret_idx in args.retrieval_token_idx:
mask[ret_idx] = 1
for gen_idx in args.gen_token_idx:
mask[gen_idx] = 1
param.grad = param.grad * mask
# compute gradient and do SGD step
if args.grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
optimizer.zero_grad()
print('=' * 80)
with torch.no_grad():
# Normalize trainable embeddings.
frozen_norm = torch.norm(model.module.model.input_embeddings.weight[:-args.num_tokens, :], dim=1).mean(0)
for ret_idx in args.retrieval_token_idx:
trainable_weight = model.module.model.input_embeddings.weight[ret_idx, :]
model.module.model.input_embeddings.weight[ret_idx, :].div_(trainable_weight.norm(dim=-1) / frozen_norm)
# Log norms to Tensorboard.
embedding_norm = torch.norm(model.module.model.input_embeddings.weight, dim=1).mean()
ret_embedding_norm = torch.norm(model.module.model.input_embeddings.weight[args.retrieval_token_idx, :], dim=-1).mean()
all_emb_norm.update(embedding_norm.item(), images.size(0))
ret_emb_norm.update(ret_embedding_norm.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if actual_step == 1 or (i + 1) % args.print_freq == 0:
print('First 5 values of first 3 tokens of embedding matrix:', model.module.model.input_embeddings.weight.data[:3, :5])
if args.num_tokens > 0:
print('First 5 values of [GEN0] token embeddings:', model.module.model.input_embeddings.weight.data[args.gen_token_idx[0], :5])
print(f'First 5 values of [GEN{args.num_tokens-1}] token embeddings:', model.module.model.input_embeddings.weight.data[args.gen_token_idx[-1], :5])
print('First 5 values of first [IMG0] token embeddings:', model.module.model.input_embeddings.weight.data[args.retrieval_token_idx[0], :5])
print(f'First 5 values of first [IMG{args.num_tokens-1}] token embeddings:', model.module.model.input_embeddings.weight.data[args.retrieval_token_idx[-1], :5])
ex_per_sec = args.batch_size / batch_time.avg
if args.distributed:
batch_time.all_reduce()
data_time.all_reduce()
ex_per_sec = (args.batch_size / batch_time.avg) * ngpus_per_node
losses.all_reduce()
ce_losses.all_reduce()
top1.all_reduce()
top5.all_reduce()
cap_vis_emb_norm.all_reduce()
ret_vis_emb_norm.all_reduce()
inp_emb_norm.all_reduce()
ret_time.all_reduce()
all_emb_norm.all_reduce()
ret_emb_norm.all_reduce()
cont_losses.all_reduce()
gen_losses.all_reduce()
top1_caption.all_reduce()
top5_caption.all_reduce()
top1_image.all_reduce()
top5_image.all_reduce()
cap_time.all_reduce()
progress.display(i + 1)
writer.add_scalar('train/loss', losses.avg, actual_step)
writer.add_scalar('train/ce_loss', ce_losses.avg, actual_step)
writer.add_scalar('train/seq_top1_acc', top1.avg, actual_step)
writer.add_scalar('train/seq_top5_acc', top5.avg, actual_step)
writer.add_scalar('train/gen_l2_loss', gen_losses.avg, actual_step)
writer.add_scalar('train/contrastive_loss', cont_losses.avg, actual_step)
writer.add_scalar('train/t2i_top1_acc', top1_caption.avg, actual_step)
writer.add_scalar('train/t2i_top5_acc', top5_caption.avg, actual_step)
writer.add_scalar('train/i2t_top1_acc', top1_image.avg, actual_step)
writer.add_scalar('train/i2t_top5_acc', top5_image.avg, actual_step)
writer.add_scalar('train/embmat_all_norm', embedding_norm.item(), actual_step)
writer.add_scalar('train/embmat_ret_norm', ret_embedding_norm.item(), actual_step)
writer.add_scalar('train/vis_emb_norm_cap', cap_vis_emb_norm.avg, actual_step)
writer.add_scalar('train/vis_emb_norm_ret', ret_vis_emb_norm.avg, actual_step)
writer.add_scalar('train/text_emb_norm', inp_emb_norm.avg, actual_step)
writer.add_scalar('metrics/total_secs_per_batch', batch_time.avg, actual_step)
writer.add_scalar('metrics/total_secs_captioning', cap_time.avg, actual_step)
writer.add_scalar('metrics/total_secs_retrieval', ret_time.avg, actual_step)
writer.add_scalar('metrics/data_secs_per_batch', data_time.avg, actual_step)
writer.add_scalar('metrics/examples_per_sec', ex_per_sec, actual_step)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
image_bs = images.shape[0]
normalized_images = images - images.min()
normalized_images /= normalized_images.max() # (N, 3, H, W)
max_images_to_show = 16
# Append caption text.
pred_tokens = output[:, args.n_visual_tokens-1:-1, :].argmax(dim=-1)
generated_captions = tokenizer.batch_decode(pred_tokens, skip_special_tokens=False)
if model_mode == 'captioning':
# OPTIM(jykoh): Truncate before creating images, rather than after. Some compute might be saved.
# Create generated caption text.
generated_cap_images = torch.stack([
utils.create_image_of_text(
generated_captions[i].encode('ascii', 'ignore'),
width=normalized_images.shape[3],
color=(255, 255, 0))
for i in range(len(generated_captions))], axis=0)
# Duplicate captions if we concatenated them.
if (args.concat_captions_prob > 0 and model_mode == 'captioning' and generated_cap_images.shape[0] != caption_images.shape[0]):
generated_cap_images = torch.cat([generated_cap_images, generated_cap_images], axis=0)
display_images = torch.cat([normalized_images.float().cpu(), caption_images, generated_cap_images], axis=2)[:max_images_to_show]
grid = torchvision.utils.make_grid(display_images, nrow=int(max_images_to_show ** 0.5), padding=4)
writer.add_image('train/images_gen_cap', grid, actual_step)
# Retrieved images (from text).
retrieved_image_idx = logits_per_text[:image_bs, :image_bs].argmax(-1)
t2i_images = torch.stack(
[normalized_images[retrieved_image_idx[i], ...] for i in range(len(retrieved_image_idx))],
axis=0)
t2i_images = torch.cat([t2i_images.float().cpu(), caption_images], axis=2)[:max_images_to_show]
t2i_grid = torchvision.utils.make_grid(t2i_images, nrow=int(max_images_to_show ** 0.5), padding=4)
writer.add_image('train/t2i_ret', t2i_grid, actual_step)
# Retrieved text (from image).
retrieved_text_idx = logits_per_image[:image_bs, :image_bs].argmax(-1)
retrieved_text = torch.stack(
[caption_images[retrieved_image_idx[i], ...] for i in range(len(retrieved_text_idx))],
axis=0)
i2t_images = torch.cat([normalized_images.float().cpu(), retrieved_text], axis=2)[:max_images_to_show]
i2t_grid = torchvision.utils.make_grid(i2t_images, nrow=int(max_images_to_show ** 0.5), padding=4)
writer.add_image('train/i2t_ret', i2t_grid, actual_step)
batch_time.reset()
cap_time.reset()
ret_time.reset()
data_time.reset()
losses.reset()
ce_losses.reset()
top1.reset()
top5.reset()
ret_vis_emb_norm.reset()
cap_vis_emb_norm.reset()
inp_emb_norm.reset()
all_emb_norm.reset()
ret_emb_norm.reset()
cont_losses.reset()
gen_losses.reset()
top1_caption.reset()
top5_caption.reset()
top1_image.reset()
top5_image.reset()
if i == args.steps_per_epoch - 1:
break
scheduler.step()
curr_lr = scheduler.get_last_lr()
if (actual_step == 1) or (i + 1) % args.print_freq == 0:
# Write current learning rate to Tensorboard.
writer = SummaryWriter(args.log_dir)
writer.add_scalar('train/lr', curr_lr[0], actual_step)
writer.close()
writer.close()
# Disable tokenizer parallelism.
os.environ["TOKENIZERS_PARALLELISM"] = "false"
if __name__ == '__main__':
main(sys.argv[1:]) | gill-main | main.py |
import tempfile
from share_btn import community_icon_html, loading_icon_html, share_js, save_js
import huggingface_hub
import gradio as gr
from gill import utils
from gill import models
import matplotlib.pyplot as plt
from PIL import Image
import torch
import numpy as np
import os
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "False"
css = """
#chatbot { min-height: 300px; }
#save-btn {
background-image: linear-gradient(to right bottom, rgba(130,217,244, 0.9), rgba(158,231,214, 1.0));
}
#save-btn:hover {
background-image: linear-gradient(to right bottom, rgba(110,197,224, 0.9), rgba(138,211,194, 1.0));
}
#share-btn {
background-image: linear-gradient(to right bottom, rgba(130,217,244, 0.9), rgba(158,231,214, 1.0));
}
#share-btn:hover {
background-image: linear-gradient(to right bottom, rgba(110,197,224, 0.9), rgba(138,211,194, 1.0));
}
#gallery { z-index: 999999; }
#gallery img:hover {transform: scale(2.3); z-index: 999999; position: relative; padding-right: 30%; padding-bottom: 30%;}
#gallery button img:hover {transform: none; z-index: 999999; position: relative; padding-right: 0; padding-bottom: 0;}
@media (hover: none) {
#gallery img:hover {transform: none; z-index: 999999; position: relative; padding-right: 0; 0;}
}
.html2canvas-container { width: 3000px !important; height: 3000px !important; }
"""
examples = [
'examples/ramen.png',
'examples/cake.png',
'examples/couch.png',
'examples/tattoo.png',
'examples/cupcakes.png',
]
# Download model from HF Hub.
ckpt_path = huggingface_hub.hf_hub_download(
repo_id='jykoh/gill', filename='pretrained_ckpt.pth.tar')
decision_model_path = huggingface_hub.hf_hub_download(
repo_id='jykoh/gill', filename='decision_model.pth.tar')
args_path = huggingface_hub.hf_hub_download(
repo_id='jykoh/gill', filename='model_args.json')
model = models.load_gill('./', args_path, ckpt_path, decision_model_path)
def upload_image(state, image_input):
conversation = state[0]
chat_history = state[1]
input_image = Image.open(image_input.name).resize(
(224, 224)).convert('RGB')
input_image.save(image_input.name) # Overwrite with smaller image.
conversation += [(f'<img src="./file={image_input.name}" style="display: inline-block;">', "")]
return [conversation, chat_history + [input_image, ""]], conversation
def reset():
return [[], []], []
def reset_last(state):
conversation = state[0][:-1]
chat_history = state[1][:-2]
return [conversation, chat_history], conversation
def save_image_to_local(image: Image.Image):
# TODO(jykoh): Update so the url path is used, to prevent repeat saving.
filename = next(tempfile._get_candidate_names()) + '.png'
image.save(filename)
return filename
def generate_for_prompt(input_text, state, ret_scale_factor, num_words, temperature):
g_cuda = torch.Generator(device='cuda').manual_seed(1337)
# Ignore empty inputs.
if len(input_text) == 0:
return state, state[0], gr.update(visible=True)
input_prompt = 'Q: ' + input_text + '\nA:'
conversation = state[0]
chat_history = state[1]
print('Generating for', chat_history, flush=True)
# If an image was uploaded, prepend it to the model.
model_inputs = chat_history
model_inputs.append(input_prompt)
# Remove empty text.
model_inputs = [s for s in model_inputs if s != '']
top_p = 1.0
if temperature != 0.0:
top_p = 0.95
print('Running model.generate_for_images_and_texts with', model_inputs, flush=True)
model_outputs = model.generate_for_images_and_texts(model_inputs,
num_words=max(num_words, 1), ret_scale_factor=ret_scale_factor, top_p=top_p,
temperature=temperature, max_num_rets=1,
num_inference_steps=50, generator=g_cuda)
print('model_outputs', model_outputs, ret_scale_factor, flush=True)
response = ''
text_outputs = []
for output_i, p in enumerate(model_outputs):
if type(p) == str:
if output_i > 0:
response += '<br/>'
# Remove the image tokens for output.
text_outputs.append(p.replace('[IMG0] [IMG1] [IMG2] [IMG3] [IMG4] [IMG5] [IMG6] [IMG7]', ''))
response += p
if len(model_outputs) > 1:
response += '<br/>'
elif type(p) == dict:
# Decide whether to generate or retrieve.
if p['decision'] is not None and p['decision'][0] == 'gen':
image = p['gen'][0][0]#.resize((224, 224))
filename = save_image_to_local(image)
response += f'<img src="./file={filename}" style="display: inline-block;"><p style="font-size: 12px; color: #555; margin-top: 0;">(Generated)</p>'
else:
image = p['ret'][0][0]#.resize((224, 224))
filename = save_image_to_local(image)
response += f'<img src="./file={filename}" style="display: inline-block;"><p style="font-size: 12px; color: #555; margin-top: 0;">(Retrieved)</p>'
chat_history = model_inputs + \
[' '.join([s for s in model_outputs if type(s) == str]) + '\n']
# Remove [RET] from outputs.
conversation.append((input_text, response.replace('[IMG0] [IMG1] [IMG2] [IMG3] [IMG4] [IMG5] [IMG6] [IMG7]', '')))
# Set input image to None.
print('state', state, flush=True)
print('updated state', [conversation, chat_history], flush=True)
return [conversation, chat_history], conversation, gr.update(visible=True), gr.update(visible=True)
with gr.Blocks(css=css) as demo:
gr.HTML("""
<h1>🐟 GILL</h1>
<p>This is the official Gradio demo for the GILL model, a model that can process arbitrarily interleaved image and text inputs, and produce image and text outputs.</p>
<strong>Paper:</strong> <a href="https://arxiv.org/abs/2305.17216" target="_blank">Generating Images with Multimodal Language Models</a>
<br/>
<strong>Project Website:</strong> <a href="https://jykoh.com/gill" target="_blank">GILL Website</a>
<br/>
<strong>Code and Models:</strong> <a href="https://github.com/kohjingyu/gill" target="_blank">GitHub</a>
<br/>
<br/>
<strong>Tips:</strong>
<ul>
<li>Start by inputting either image or text prompts (or both) and chat with GILL to get image-and-text replies.</li>
<li>Tweak the level of sensitivity to images and text using the parameters on the right.</li>
<li>Check out cool conversations in the examples or community tab for inspiration and share your own!</li>
<li>If the model outputs a blank image, it is because Stable Diffusion's safety filter detected inappropriate content. Please try again with a different prompt.</li>
<li>Outputs may differ slightly from the paper due to slight implementation differences. For reproducing paper results, please use our <a href="https://github.com/kohjingyu/gill" target="_blank">official code</a>.</li>
<li>For faster inference without waiting in queue, you may duplicate the space and use your own GPU: <a href="https://huggingface.co/spaces/jykoh/gill?duplicate=true"><img style="display: inline-block; margin-top: 0em; margin-bottom: 0em" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></li>
</ul>
""")
gr_state = gr.State([[], []]) # conversation, chat_history
with gr.Row():
with gr.Column(scale=0.7, min_width=500):
with gr.Row():
chatbot = gr.Chatbot(elem_id="chatbot", label="🐟 GILL Chatbot")
with gr.Row():
image_btn = gr.UploadButton("🖼️ Upload Image", file_types=["image"])
text_input = gr.Textbox(label="Message", placeholder="Type a message")
with gr.Column():
submit_btn = gr.Button(
"Submit", interactive=True, variant="primary")
clear_last_btn = gr.Button("Undo")
clear_btn = gr.Button("Reset All")
with gr.Row(visible=False) as save_group:
save_button = gr.Button("💾 Save Conversation as .png", elem_id="save-btn")
with gr.Row(visible=False) as share_group:
share_button = gr.Button("🤗 Share to Community (opens new window)", elem_id="share-btn")
with gr.Column(scale=0.3, min_width=400):
ret_scale_factor = gr.Slider(minimum=0.0, maximum=3.0, value=1.3, step=0.1, interactive=True,
label="Frequency multiplier for returning images (higher means more frequent)")
gr_max_len = gr.Slider(minimum=1, maximum=64, value=32,
step=1, interactive=True, label="Max # of words")
gr_temperature = gr.Slider(
minimum=0.0, maximum=1.0, value=0.0, step=0.1, interactive=True, label="Temperature (0 for deterministic, higher for more randomness)")
gallery = gr.Gallery(
value=[Image.open(e) for e in examples], label="Example Conversations", show_label=True, elem_id="gallery",
).style(grid=[2], height="auto")
text_input.submit(generate_for_prompt, [text_input, gr_state, ret_scale_factor,
gr_max_len, gr_temperature], [gr_state, chatbot, share_group, save_group])
text_input.submit(lambda: "", None, text_input) # Reset chatbox.
submit_btn.click(generate_for_prompt, [text_input, gr_state, ret_scale_factor,
gr_max_len, gr_temperature], [gr_state, chatbot, share_group, save_group])
submit_btn.click(lambda: "", None, text_input) # Reset chatbox.
image_btn.upload(upload_image, [gr_state, image_btn], [gr_state, chatbot])
clear_last_btn.click(reset_last, [gr_state], [gr_state, chatbot])
clear_btn.click(reset, [], [gr_state, chatbot])
share_button.click(None, [], [], _js=share_js)
save_button.click(None, [], [], _js=save_js)
demo.queue(concurrency_count=1, api_open=False, max_size=16)
demo.launch(debug=True, server_name="0.0.0.0")
| gill-main | demo/app_gradio.py |
# Modified from https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation/blob/79681cd8cb235160a27cdd100673346eb1784e53/share_btn.py
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
</svg>"""
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
style="color: #ffffff;
"
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
share_js = """
async () => {
const html2canvas = (await import('https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.esm.js')).default;
async function uploadFile(file) {
console.log(file.type)
const UPLOAD_URL = 'https://huggingface.co/uploads';
const response = await fetch(UPLOAD_URL, {
method: 'POST',
headers: {
'Content-Type': file.type,
'X-Requested-With': 'XMLHttpRequest',
},
body: file, /// <- File inherits from Blob
});
const url = await response.text();
return url;
}
async function getImageFile(div) {
let chatbot = document.getElementById("chatbot");
chatbot.style.height = "";
return new Promise((resolve, reject) =>
html2canvas(div)
.then((canvas) => {
chatbot.style.height = "400px";
const imageBlob = canvas.toBlob((blob) => {
const imageId = Date.now();
const fileName = "GILL-" + imageId + ".jpg";
resolve(new File([blob], fileName, { type: 'image/jpeg' }));
}, 'image/jpeg', 0.95);
})
)
}
const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app');
const chatbotEl = gradioEl.querySelector('#chatbot')
const imageFile = await getImageFile(chatbotEl);
console.log(imageFile);
const urlChatbotImage = await uploadFile(imageFile);
console.log(urlChatbotImage);
let titleTxt = `GILL Example`;
//const shareBtnEl = gradioEl.querySelector('#share-btn');
//shareBtnEl.style.pointerEvents = 'none';
const descriptionMd = `
<img src='${urlChatbotImage}'>
`;
const params = new URLSearchParams({
title: titleTxt,
description: descriptionMd,
});
const paramsStr = params.toString();
window.open(`https://huggingface.co/spaces/jykoh/gill/discussions/new?${paramsStr}`, '_blank');
//shareBtnEl.style.removeProperty('pointer-events');
}
"""
save_js = """
async () => {
const html2canvas = (await import('https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.esm.js')).default;
function saveAs(uri, filename) {
var link = document.createElement('a');
if (typeof link.download === 'string') {
link.href = uri;
link.download = filename;
//Firefox requires the link to be in the body
document.body.appendChild(link);
//simulate click
link.click();
//remove the link when done
document.body.removeChild(link);
} else {
window.open(uri);
}
}
async function getImageFile(div) {
let chatbot = document.getElementById("chatbot");
chatbot.style.height = "";
return new Promise((resolve, reject) =>
html2canvas(div)
.then((canvas) => {
chatbot.style.height = "400px";
const imageId = Date.now();
const fileName = "GILL-" + imageId + ".png";
saveAs(canvas.toDataURL(), fileName);
})
)
}
const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app');
const chatbotEl = gradioEl.querySelector('#chatbot')
const imageFile = await getImageFile(chatbotEl);
console.log(imageFile);
}
""" | gill-main | demo/share_btn.py |
from typing import List, Optional
from collections import namedtuple
from diffusers import StableDiffusionPipeline
import json
import numpy as np
import os
import glob
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import pickle as pkl
from PIL import Image, UnidentifiedImageError
from requests.exceptions import ConnectionError
from transformers import AutoTokenizer, AutoModel, CLIPVisionModel, OPTForCausalLM
from gill import utils
from gill import layers
class GILLArgs:
freeze_lm: bool = True
freeze_vm: bool = True
opt_version: str = 'facebook/opt-6.7b'
visual_encoder: str = 'openai/clip-vit-large-patch14'
n_visual_tokens: int = 1
task: str = 'captioning'
ret_emb_dim: Optional[int] = 256
gen_emb_dim: Optional[int] = 256
text_emb_layers: List[int] = [-1]
gen_token_idx: List[int] = [0]
retrieval_token_idx: List[int] = [0]
text_fc_mode: str = 'gill_mapper'
ret_text_fc_mode: str = 'linear'
num_tokens: int = 8
num_clip_tokens: int = 77
class GILLModel(nn.Module):
def __init__(self, tokenizer, args: GILLArgs = GILLArgs()):
super().__init__()
self.tokenizer = tokenizer
self.feature_extractor = utils.get_feature_extractor_for_model(args.visual_encoder, train=False)
self.image_token = self.tokenizer.cls_token_id
assert args.text_emb_layers != set(args.text_emb_layers), 'text_emb_layers not unique'
self.args = args
self.num_tokens = args.num_tokens
self.num_clip_tokens = args.num_clip_tokens
opt_version = args.opt_version
visual_encoder = args.visual_encoder
n_visual_tokens = args.n_visual_tokens
print(f"Using {opt_version} for the language model.")
print(f"Using {visual_encoder} for the visual model with {n_visual_tokens} visual tokens.")
if 'facebook/opt' in opt_version:
self.lm = OPTForCausalLM.from_pretrained(opt_version)
else:
raise NotImplementedError
self.opt_version = opt_version
if self.args.freeze_lm:
self.lm.eval()
print("Freezing the LM.")
for param in self.lm.parameters():
param.requires_grad = False
else:
self.lm.train()
self.retrieval_token_idx = args.retrieval_token_idx
self.gen_token_idx = args.gen_token_idx
self.lm.resize_token_embeddings(len(tokenizer))
self.input_embeddings = self.lm.get_input_embeddings()
print("Restoring pretrained weights for the visual model.")
if 'clip' in visual_encoder:
self.visual_model = CLIPVisionModel.from_pretrained(visual_encoder)
else:
self.visual_model = AutoModel.from_pretrained(visual_encoder)
if 'clip' in visual_encoder:
hidden_size = self.visual_model.config.hidden_size
else:
raise NotImplementedError
if self.args.freeze_vm:
print("Freezing the VM.")
self.visual_model.eval()
for param in self.visual_model.parameters():
param.requires_grad = False
else:
self.visual_model.train()
self.visual_model_name = visual_encoder
embedding_dim = self.input_embeddings.embedding_dim * self.args.n_visual_tokens
self.ret_text_hidden_fcs = nn.ModuleList([])
self.gen_text_hidden_fcs = nn.ModuleList([])
for layer_idx in self.args.text_emb_layers:
if (layer_idx == -1 or layer_idx == self.lm.config.num_hidden_layers) and ('bert' not in opt_version):
if 'opt' in opt_version: # OPT models
in_dim = self.lm.config.word_embed_proj_dim
else:
raise NotImplementedError
self.ret_text_hidden_fcs.append(
layers.TextFcLayer(in_dim, self.args.ret_emb_dim, num_input_tokens=self.args.num_tokens,
num_output_tokens=1, mode=self.args.ret_text_fc_mode))
self.gen_text_hidden_fcs.append(
layers.TextFcLayer(in_dim, self.args.gen_emb_dim, num_input_tokens=self.args.num_tokens,
num_output_tokens=self.args.num_clip_tokens, mode=self.args.text_fc_mode))
elif layer_idx < self.lm.config.num_hidden_layers:
self.ret_text_hidden_fcs.append(layers.TextFcLayer(self.lm.config.hidden_size, self.args.ret_emb_dim, num_input_tokens=self.args.num_tokens, num_output_tokens=1, mode=self.args.ret_text_fc_mode))
self.gen_text_hidden_fcs.append(layers.TextFcLayer(self.lm.config.hidden_size, self.args.gen_emb_dim, num_input_tokens=self.args.num_tokens, num_output_tokens=self.args.num_clip_tokens, mode=self.args.text_fc_mode))
else:
raise ValueError(f'Embedding of layer {layer_idx} was requested but model only has {self.lm.config.num_hidden_layers} layers.')
self.visual_embeddings = nn.Linear(hidden_size, embedding_dim)
# Retrieval image FC layer.
self.visual_fc = nn.Linear(hidden_size, self.args.ret_emb_dim)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def get_visual_embs(self, pixel_values: torch.FloatTensor, mode: str = 'captioning'):
if mode not in ['captioning', 'retrieval', 'generation']:
raise ValueError(f"mode should be one of ['captioning', 'retrieval', 'generation'], got {mode} instead.")
# Extract visual embeddings from the vision encoder.
if 'clip' in self.visual_model_name:
outputs = self.visual_model(pixel_values)
encoder_outputs = outputs.pooler_output
else:
raise NotImplementedError
# Use the correct fc based on function argument.
if mode == 'captioning':
visual_embs = self.visual_embeddings(encoder_outputs) # (2, D * n_visual_tokens)
visual_embs = torch.reshape(visual_embs, (visual_embs.shape[0], self.args.n_visual_tokens, -1))
elif mode == 'retrieval':
visual_embs = self.visual_fc(encoder_outputs) # (2, D * n_visual_tokens)
visual_embs = torch.reshape(visual_embs, (visual_embs.shape[0], 1, -1))
elif mode == 'generation':
visual_embs = torch.zeros((pixel_values.shape[0], 1, 768), device=pixel_values.device)
else:
raise NotImplementedError
return visual_embs
def train(self, mode=True):
super(GILLModel, self).train(mode=mode)
# Overwrite train() to ensure frozen models remain frozen.
if self.args.freeze_lm:
self.lm.eval()
if self.args.freeze_vm:
self.visual_model.eval()
def forward(
self,
pixel_values: torch.FloatTensor,
labels: Optional[torch.LongTensor] = None,
caption_len: Optional[torch.LongTensor] = None,
mode: str = 'captioning',
concat_captions: bool = False,
input_prefix: Optional[str] = None,
):
visual_embs = self.get_visual_embs(pixel_values, mode)
batch_size, vis_seq_len, _ = visual_embs.shape # vis_seq_len = n_visual_tokens
if labels is not None:
assert labels.shape[0] == batch_size, (visual_embs.shape, labels.shape)
visual_embs_norm = ((visual_embs ** 2).sum(dim=-1) ** 0.5).mean()
input_embs = self.input_embeddings(labels) # (N, T, D)
input_embs_norm = ((input_embs ** 2).sum(dim=-1) ** 0.5).mean()
last_embedding_idx = caption_len - 1 # -1 to retrieve the token before the eos token
if input_prefix is not None:
prompt_ids = self.tokenizer(input_prefix, add_special_tokens=False, return_tensors="pt").input_ids
prompt_ids = prompt_ids.to(visual_embs.device)
prompt_embs = self.input_embeddings(prompt_ids)
prompt_embs = prompt_embs.repeat(batch_size, 1, 1)
assert prompt_embs.shape[0] == batch_size, prompt_embs.shape
assert prompt_embs.shape[2] == input_embs.shape[2], prompt_embs.shape
assert len(prompt_embs.shape) == 3, prompt_embs.shape
if mode == 'captioning':
# Concat to text embeddings.
condition_seq_len = 0
if input_prefix is None:
# Just add visual embeddings.
input_embs = torch.cat([visual_embs, input_embs], axis=1)
last_embedding_idx += vis_seq_len
condition_seq_len += vis_seq_len
full_labels = torch.zeros(visual_embs.shape[:2], dtype=torch.int64).to(visual_embs.device) - 100
else:
print(f'Adding prefix "{input_prefix}" to captioning.')
# Add visual and prompt embeddings.
prefix_embs = torch.cat([visual_embs, prompt_embs], axis=1)
input_embs = torch.cat([prefix_embs, input_embs], axis=1)
last_embedding_idx += prefix_embs.shape[1]
condition_seq_len += prefix_embs.shape[1]
full_labels = torch.zeros(prefix_embs.shape[:2], dtype=torch.int64).to(visual_embs.device) - 100
# Mask out embedding tokens in the labels.
full_labels = torch.cat([full_labels, labels], axis=1)
pad_idx = []
for label in full_labels:
for k, token in enumerate(label):
# Mask out retrieval/gen tokens if they exist.
if token in [self.tokenizer.pad_token_id] + self.retrieval_token_idx + self.gen_token_idx:
label[k:] = -100
pad_idx.append(k)
break
if k == len(label) - 1: # No padding found.
pad_idx.append(k + 1)
assert len(pad_idx) == batch_size, (len(pad_idx), batch_size)
bs, seq_len, embs_dim = input_embs.shape
if concat_captions:
print('Concatenating examples for captioning!')
assert len(input_embs.shape) == 3, input_embs
assert len(full_labels.shape) == 2, full_labels
assert batch_size % 2 == 0
all_concat_input_embs = []
all_concat_labels = []
# Rearrange embeddings and labels (and their padding) to concatenate captions.
for i in range(batch_size // 2):
first_idx = i * 2
second_idx = first_idx + 1
first_emb = input_embs[first_idx, :pad_idx[first_idx], :]
first_labels = full_labels[first_idx, :pad_idx[first_idx]]
first_padding = input_embs[first_idx, pad_idx[first_idx]:, :]
first_labels_padding = full_labels[first_idx, pad_idx[first_idx]:]
second_emb = input_embs[second_idx, :pad_idx[second_idx], :]
second_labels = full_labels[second_idx, :pad_idx[second_idx]]
second_padding = input_embs[second_idx, pad_idx[second_idx]:, :]
second_labels_padding = full_labels[second_idx, pad_idx[second_idx]:]
bos_idx = visual_embs.shape[1]
assert torch.all(first_labels_padding == -100), first_labels_padding
assert torch.all(second_labels_padding == -100), second_labels_padding
assert torch.all(second_labels[bos_idx] == self.tokenizer.bos_token_id), (second_labels, bos_idx, self.tokenizer.bos_token_id)
# Remove BOS token of the second caption.
second_labels = torch.cat([second_labels[:bos_idx], second_labels[bos_idx + 1:]], axis=0)
second_emb = torch.cat([second_emb[:bos_idx, :], second_emb[bos_idx + 1:, :]], axis=0)
concat_input_embs = torch.cat([first_emb, second_emb, first_padding, second_padding], axis=0) # (T*2, 768)
concat_labels = torch.cat([first_labels, second_labels, first_labels_padding, second_labels_padding], axis=0) # (T*2, 768)
all_concat_input_embs.append(concat_input_embs)
all_concat_labels.append(concat_labels)
# Pad to max length.
input_embs = torch.stack(all_concat_input_embs, axis=0) # (N/2, T*2, 768)
full_labels = torch.stack(all_concat_labels, axis=0) # (N/2, T*2, 768)
print("Concatenated full_labels:", full_labels[0, ...])
assert input_embs.shape == (bs // 2, seq_len * 2 - 1, embs_dim), input_embs.shape
assert full_labels.shape == (bs // 2, seq_len * 2 - 1), full_labels.shape
output = self.lm(inputs_embeds=input_embs,
labels=full_labels,
output_hidden_states=True)
elif mode in ['retrieval', 'generation']:
full_labels = torch.clone(labels)
if input_prefix is not None:
print(f'Adding prefix "{input_prefix}" to retrieval.')
# Add prompt embeddings.
prefix_embs = prompt_embs
input_embs = torch.cat([prefix_embs, input_embs], axis=1)
last_embedding_idx += prefix_embs.shape[1]
full_labels = torch.cat([
torch.zeros(prefix_embs.shape[:2], dtype=torch.int64).to(labels.device) - 100,
full_labels
], axis=1)
pad_idx = []
for label in full_labels:
for k, token in enumerate(label):
if (token == self.tokenizer.pad_token_id):
label[k:] = -100
pad_idx.append(k)
break
if k == len(label) - 1: # No padding found.
pad_idx.append(k + 1)
assert len(pad_idx) == batch_size, (len(pad_idx), batch_size)
bs, seq_len, embs_dim = input_embs.shape
# Concatenate examples for captioning, if specified.
if concat_captions:
print(f'Concatenating examples for {mode}!')
assert len(input_embs.shape) == 3, input_embs
assert len(full_labels.shape) == 2, full_labels
assert batch_size % 2 == 0
all_concat_input_embs = []
all_concat_labels = []
all_last_embedding_idx = []
# Rearrange embeddings and labels (and their padding) to concatenate captions.
for i in range(batch_size // 2):
first_idx = i * 2
second_idx = first_idx + 1
first_emb = input_embs[first_idx, :pad_idx[first_idx], :]
first_labels = full_labels[first_idx, :pad_idx[first_idx]]
first_padding = input_embs[first_idx, pad_idx[first_idx]:, :]
first_labels_padding = full_labels[first_idx, pad_idx[first_idx]:]
second_emb = input_embs[second_idx, :pad_idx[second_idx], :]
second_labels = full_labels[second_idx, :pad_idx[second_idx]]
second_padding = input_embs[second_idx, pad_idx[second_idx]:, :]
second_labels_padding = full_labels[second_idx, pad_idx[second_idx]:]
bos_idx = 0
assert torch.all(first_labels_padding == -100), first_labels_padding
assert torch.all(second_labels_padding == -100), second_labels_padding
assert torch.all(second_labels[bos_idx] == self.tokenizer.bos_token_id), (second_labels, bos_idx, self.tokenizer.bos_token_id)
# Remove BOS token of second caption.
second_labels = second_labels[bos_idx + 1:]
second_emb = second_emb[bos_idx + 1:, :]
last_embedding_idx[second_idx] = last_embedding_idx[second_idx] - 1
concat_input_embs = torch.cat([first_emb, second_emb, first_padding, second_padding], axis=0) # (T*2, 768)
concat_labels = torch.cat([first_labels, second_labels, first_labels_padding, second_labels_padding], axis=0) # (T*2, 768)
all_concat_input_embs.append(concat_input_embs)
all_concat_labels.append(concat_labels)
all_last_embedding_idx.append((last_embedding_idx[first_idx], first_emb.shape[0] + last_embedding_idx[second_idx]))
if mode == 'retrieval':
assert concat_labels[all_last_embedding_idx[-1][0]] in self.retrieval_token_idx, (concat_labels, all_last_embedding_idx[-1][0])
assert concat_labels[all_last_embedding_idx[-1][1]] in self.retrieval_token_idx, (concat_labels, all_last_embedding_idx[-1][1])
elif mode == 'generation':
# Check that the last n tokens are GEN tokens.
for gen_i in range(len(self.gen_token_idx)):
assert concat_labels[all_last_embedding_idx[-1][0]-gen_i] == self.gen_token_idx[-gen_i-1], (concat_labels, all_last_embedding_idx[-1][0]-gen_i, self.gen_token_idx[-gen_i-1])
assert concat_labels[all_last_embedding_idx[-1][1]-gen_i] == self.gen_token_idx[-gen_i-1], (concat_labels, all_last_embedding_idx[-1][1]-gen_i, self.gen_token_idx[-gen_i-1])
# Pad to max length.
input_embs = torch.stack(all_concat_input_embs, axis=0) # (N/2, T*2, 768)
full_labels = torch.stack(all_concat_labels, axis=0) # (N/2, T*2, 768)
assert input_embs.shape == (bs // 2, seq_len * 2 - 1, embs_dim), input_embs.shape
assert full_labels.shape == (bs // 2, seq_len * 2 - 1), full_labels.shape
# Update labels to pad non-first tokens.
for label in full_labels:
for k, token in enumerate(label):
if (token == self.tokenizer.pad_token_id) or (token in (self.retrieval_token_idx[1:] + self.gen_token_idx[1:])):
label[k:] = -100
break
output = self.lm(inputs_embeds=input_embs,
labels=full_labels,
output_hidden_states=True)
else:
raise NotImplementedError
last_embedding = None
last_output_logit = None
hidden_states = []
llm_hidden_states = []
if mode in ['retrieval', 'generation']:
num_tokens = self.num_tokens
if mode == 'retrieval':
text_hidden_fcs = self.ret_text_hidden_fcs
else:
text_hidden_fcs = self.gen_text_hidden_fcs
# Concatenate captions for retrieval / generation, if specified.
if not concat_captions:
for idx, fc_layer in zip(self.args.text_emb_layers, text_hidden_fcs):
input_hidden_state = torch.stack([output.hidden_states[idx][i, last_embedding_idx[i]-num_tokens+1:last_embedding_idx[i]+1, :] for i in range(batch_size)], axis=0)
input_embedding = torch.stack([input_embs[i, last_embedding_idx[i]-num_tokens+1:last_embedding_idx[i]+1, :] for i in range(batch_size)], axis=0)
llm_hidden_states.append(input_hidden_state)
hidden_states.append(fc_layer(input_hidden_state, input_embedding)) # (N, seq_len, 2048)
else:
for idx, fc_layer in zip(self.args.text_emb_layers, text_hidden_fcs):
all_last_embedding = []
all_input_embedding = []
all_last_output_logit = []
for i in range(batch_size // 2):
first_last_embedding_idx, second_last_embedding_idx = all_last_embedding_idx[i]
first_last_embedding = output.hidden_states[idx][i, first_last_embedding_idx-num_tokens+1:first_last_embedding_idx+1, :] # (N, D)
second_last_embedding = output.hidden_states[idx][i, second_last_embedding_idx-num_tokens+1:second_last_embedding_idx+1, :] # (N, D)
all_last_embedding.append(first_last_embedding)
all_last_embedding.append(second_last_embedding)
first_input_embs = input_embs[i, first_last_embedding_idx-num_tokens+1:first_last_embedding_idx+1, :] # (N, D)
second_input_embs = input_embs[i, second_last_embedding_idx-num_tokens+1:second_last_embedding_idx+1, :] # (N, D)
all_input_embedding.append(first_input_embs)
all_input_embedding.append(second_input_embs)
first_last_output_logit = output.logits[i, first_last_embedding_idx - 1, :] # (N, D)
second_last_output_logit = output.logits[i, second_last_embedding_idx - 1, :] # (N, D)
all_last_output_logit.append(first_last_output_logit)
all_last_output_logit.append(second_last_output_logit)
last_embedding = torch.stack(all_last_embedding, axis=0)
input_embedding = torch.stack(all_input_embedding, axis=0)
last_output_logit = torch.stack(all_last_output_logit, axis=0)
llm_hidden_states.append(last_embedding)
hidden_states.append(fc_layer(last_embedding, input_embedding)) # (N, seq_len, 2048)
if not concat_captions:
# Add hidden states together.
last_embedding = torch.stack(hidden_states, dim=-1).sum(dim=-1) #torch.stack([last_hidden_state[i, :, :] for i in range(batch_size)], axis=0) # (N, T, D)
last_output_logit = torch.stack([output.logits[i, last_embedding_idx[i] - 1, :] for i in range(batch_size)], axis=0) # (N, D)
else:
# Add hidden states together.
last_embedding = torch.stack(hidden_states, dim=-1).sum(dim=-1)
# Compute retrieval loss.
if mode == 'retrieval':
assert visual_embs.shape[1] == 1, visual_embs.shape
assert last_embedding.shape[1] == 1, last_embedding.shape
visual_embs = visual_embs[:, 0, :]
visual_embs = visual_embs / visual_embs.norm(dim=1, keepdim=True)
last_embedding = last_embedding[:, 0, :]
last_embedding = last_embedding / last_embedding.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
visual_embs = logit_scale * visual_embs
elif mode == 'captioning':
pass
else:
raise NotImplementedError
return output, full_labels, last_embedding, last_output_logit, visual_embs, visual_embs_norm, input_embs_norm, llm_hidden_states
def generate(self, embeddings = torch.FloatTensor, max_len: int = 32,
temperature: float = 0.0, top_p: float = 1.0, min_word_tokens: int = 0,
ret_scale_factor: float = 1.0, gen_scale_factor: float = 1.0,
filter_value: float = -float('Inf')):
"""Runs greedy decoding and returns generated captions.
Args:
min_word_tokens: Minimum number of words to generate before allowing a [IMG] output.
filter_value: Value to assign to tokens that should never be generated.
Outputs:
out: (N, T) int32 sequence of output tokens.
output_embeddings: (N, T, 256) sequence of text output embeddings.
"""
self.lm.eval()
with torch.no_grad(): # no tracking history
# init output with image tokens
out = None
output_embeddings = []
output_logits = []
for i in range(max_len):
output = self.lm(inputs_embeds=embeddings, use_cache=False, output_hidden_states=True)
for idx in self.args.text_emb_layers:
output_embeddings.append(output.hidden_states[idx])
logits = output.logits[:, -1, :] # (N, vocab_size)
if top_p == 1.0:
logits = logits.cpu()
output_logits.append(logits)
# Prevent the model from generating the [IMG1..n] tokens.
logits[:, self.retrieval_token_idx[1:]] = filter_value
logits[:, self.gen_token_idx[1:]] = filter_value
if (self.retrieval_token_idx or self.gen_token_idx) and self.retrieval_token_idx[0] != -1 and self.gen_token_idx[0] != -1:
if i < min_word_tokens:
# Eliminate probability of generating [IMG] if this is earlier than min_word_tokens.
logits[:, self.retrieval_token_idx] = filter_value
logits[:, self.gen_token_idx] = filter_value
else:
# Multiply by scaling factor.
if ret_scale_factor > 1:
logits[:, self.retrieval_token_idx[0]] = logits[:, self.retrieval_token_idx[0]].abs() * ret_scale_factor
if gen_scale_factor > 1:
logits[:, self.gen_token_idx[0]] = logits[:, self.gen_token_idx[0]].abs() * gen_scale_factor
if temperature == 0.0:
if top_p != 1.0:
raise ValueError('top_p cannot be set if temperature is 0 (greedy decoding).')
next_token = torch.argmax(logits, keepdim=True, dim=-1) # (N, 1)
else:
logits = logits / temperature
# Apply top-p filtering.
if top_p < 1.0:
assert top_p > 0, f'top_p should be above 0, got {top_p} instead.'
sorted_logits, sorted_indices = torch.sort(logits, descending=True) # (N, D) and (N, D)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # (N, D)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for j in range(sorted_indices.shape[0]):
indices_to_remove = sorted_indices[j, sorted_indices_to_remove[j, :]]
logits[j, indices_to_remove] = filter_value
token_weights = logits.exp() # (N, vocab_size)
next_token = torch.multinomial(token_weights, 1) # (N, 1)
# Force generation of the remaining [IMG] tokens if [IMG0] is generated.
if next_token.shape[0] == 1 and next_token.item() == self.retrieval_token_idx[0]:
assert self.retrieval_token_idx == self.gen_token_idx, (self.retrieval_token_idx, self.gen_token_idx)
next_token = torch.tensor(self.retrieval_token_idx)[None, :].long().to(embeddings.device) # (1, num_tokens)
else:
next_token = next_token.long().to(embeddings.device)
if out is not None:
out = torch.cat([out, next_token], dim=-1)
else:
out = next_token
next_embedding = self.input_embeddings(next_token)
embeddings = torch.cat([embeddings, next_embedding], dim=1)
return out, output_embeddings, output_logits
class GILL(nn.Module):
def __init__(self, tokenizer, model_args: Optional[GILLArgs] = None,
path_array: Optional[List[str]] = None, emb_matrix: Optional[torch.tensor] = None,
load_sd: bool = False, num_gen_images: int = 1, decision_model_path: Optional[str] = None):
super().__init__()
self.model = GILLModel(tokenizer, model_args)
self.path_array = path_array
self.emb_matrix = emb_matrix
self.load_sd = load_sd
self.num_gen_images = num_gen_images
self.idx2dec = {0: 'gen', 1: 'ret', 2: 'same'}
self.decision_model = None
# Load the Stable Diffusion model.
if load_sd:
model_id = "runwayml/stable-diffusion-v1-5"
self.sd_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
if decision_model_path is not None:
print('Loading decision model...')
self.decision_model = nn.Sequential(*[
nn.Dropout(0.5),
nn.Linear(4096, 2),
])
mlp_checkpoint = torch.load(decision_model_path)
self.decision_model.load_state_dict(mlp_checkpoint['state_dict'], strict=True)
self.decision_model.eval()
def __call__(self, images: Tensor, tgt_tokens: Optional[Tensor] = None, caption_len: Optional[Tensor] = None,
generate: bool = False, num_words: int = 32, temperature: float = 1.0, top_p: float = 1.0,
ret_scale_factor: float = 1.0, gen_scale_factor: float = 1.0,
min_word_tokens: int = 0, mode: str = 'captioning', concat_captions: bool = False,
input_prefix: Optional[str] = None) -> Tensor:
if generate:
return self.model.generate(images, num_words, temperature=temperature, top_p=top_p,
min_word_tokens=min_word_tokens, ret_scale_factor=ret_scale_factor,
gen_scale_factor=gen_scale_factor)
else:
output = self.model(
pixel_values = images,
labels = tgt_tokens,
caption_len = caption_len,
mode = mode,
concat_captions = concat_captions,
input_prefix = input_prefix)
return output
def generate_for_images_and_texts(
self, prompts: List, num_words: int = 0, min_word_tokens: int = 0, ret_scale_factor: float = 1.0, gen_scale_factor: float = 1.0,
top_p: float = 1.0, temperature: float = 0.0, max_num_rets: int = 1, generator=None,
always_add_bos : bool = False, guidance_scale: float = 7.5, num_inference_steps: int = 50):
"""
Encode prompts into embeddings, and generates text and image outputs accordingly.
Args:
prompts: List of interleaved PIL.Image.Image and strings representing input to the model.
num_words: Maximum number of words to generate for. If num_words = 0, the model will run its forward pass and return the outputs.
min_word_tokens: Minimum number of actual words before generating an image.
ret_scale_factor: Proportion to scale [IMG] token logits by. A higher value may increase the probability of the model generating [IMG] outputs.
top_p: If set to < 1, the smallest set of tokens with highest probabilities that add up to top_p or higher are kept for generation.
temperature: Used to modulate logit distribution.
max_num_rets: Maximum number of images to return in one generation pass.
Returns:
return_outputs: List consisting of either str or List[PIL.Image.Image] objects, representing image-text interleaved model outputs.
"""
input_embs = []
input_ids = []
add_bos = True
with torch.no_grad():
for p in prompts:
if type(p) == Image.Image:
# Encode as image.
pixel_values = utils.get_pixel_values_for_model(self.model.feature_extractor, p)
pixel_values = pixel_values.to(device=self.model.logit_scale.device, dtype=self.model.logit_scale.dtype)
pixel_values = pixel_values[None, ...]
visual_embs = self.model.get_visual_embs(pixel_values, mode='captioning') # (1, n_visual_tokens, D)
input_embs.append(visual_embs)
elif type(p) == str:
text_ids = self.model.tokenizer(p, add_special_tokens=add_bos, return_tensors="pt").input_ids.to(self.model.logit_scale.device)
# Only add <bos> once unless the flag is set.
if not always_add_bos:
add_bos = False
text_embs = self.model.input_embeddings(text_ids) # (1, T, D)
input_embs.append(text_embs)
input_ids.append(text_ids)
else:
raise ValueError(f'Input prompts should be either PIL.Image.Image or str types, got {type(p)} instead.')
input_embs = torch.cat(input_embs, dim=1)
input_ids = torch.cat(input_ids, dim=1)
if num_words == 0:
raise NotImplementedError('Generation not implemented for num_words=0.')
elif num_words > 0:
generated_ids, generated_embeddings, _ = self.model.generate(input_embs, num_words, min_word_tokens=min_word_tokens,
temperature=temperature, top_p=top_p, ret_scale_factor=ret_scale_factor, gen_scale_factor=gen_scale_factor)
embeddings = generated_embeddings[-1][:, input_embs.shape[1]:]
# Truncate to newline.
newline_token_id = self.model.tokenizer('\n', add_special_tokens=False).input_ids[0]
trunc_idx = 0
for j in range(generated_ids.shape[1]):
if generated_ids[0, j] == newline_token_id:
trunc_idx = j
break
if trunc_idx > 0:
generated_ids = generated_ids[:, :trunc_idx]
embeddings = embeddings[:, :trunc_idx]
else:
raise ValueError
# Save outputs as an interleaved list.
return_outputs = []
# Find up to max_num_rets [IMG] tokens, and their corresponding scores.
all_ret_idx = [i for i, x in enumerate(generated_ids[0, :] == self.model.retrieval_token_idx[0]) if x][:max_num_rets]
seen_image_idx = [] # Avoid showing the same image multiple times.
last_ret_idx = 0
if len(all_ret_idx) == 0:
# No [IMG] tokens.
caption = self.model.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return_outputs.append(utils.truncate_caption(caption))
else:
for ret_idx in all_ret_idx:
assert generated_ids[0, ret_idx:ret_idx+self.model.num_tokens].cpu().detach().numpy().tolist() == self.model.retrieval_token_idx, (generated_ids[0, ret_idx:ret_idx+self.model.num_tokens], self.model.retrieval_token_idx)
raw_emb = embeddings[:, ret_idx:ret_idx+self.model.num_tokens, :] # (1, 8, 4096)
assert len(self.model.args.text_emb_layers) == 1
image_outputs = {
'gen': [],
'ret': [],
'decision': None,
}
if self.emb_matrix is not None:
# Produce retrieval embedding.
ret_emb = self.model.ret_text_hidden_fcs[0](raw_emb, None)[:, 0, :] # (1, 256)
ret_emb = ret_emb / ret_emb.norm(dim=-1, keepdim=True)
ret_emb = ret_emb.type(self.emb_matrix.dtype) # (1, 256)
scores = self.emb_matrix @ ret_emb.T
# Downweight seen images.
for seen_idx in seen_image_idx:
scores[seen_idx, :] -= 1000
# Get the top 3 images for each image.
_, top_image_idx = scores.squeeze().topk(3)
for img_idx in top_image_idx:
# Find the first image that does not error out.
try:
seen_image_idx.append(img_idx)
img = utils.get_image_from_url(self.path_array[img_idx])
image_outputs['ret'].append((img, 'ret', scores[img_idx].item()))
if len(image_outputs) == max_num_rets:
break
except (UnidentifiedImageError, ConnectionError, OSError):
pass
# Make decision with MLP.
if self.decision_model is not None:
decision_emb = raw_emb[:, 0, :] # (1, 4096)
assert decision_emb.shape[1] == 4096, decision_emb.shape
max_ret_score = scores.max().reshape((1, 1)).clone().detach().to(device=decision_emb.device, dtype=decision_emb.dtype)
decision_logits = self.decision_model(torch.cat([decision_emb, max_ret_score], dim=-1))
probs = decision_logits.softmax(dim=-1).cpu().float().numpy().tolist()
image_outputs['decision'] = [self.idx2dec[decision_logits.argmax().item()]] + probs
else:
# If no embedding matrix is provided, generate instead.
image_outputs['decision'] = ['gen', [0, 1]]
# Produce generation embedding.
gen_prefix = ' '.join([f'[IMG{i}]' for i in range(self.model.args.num_tokens)])
gen_prefx_ids = self.model.tokenizer(gen_prefix, add_special_tokens=False, return_tensors="pt").input_ids.to(self.model.logit_scale.device)
gen_prefix_embs = self.model.input_embeddings(gen_prefx_ids) # (1, T, D)
gen_emb = self.model.gen_text_hidden_fcs[0](raw_emb, gen_prefix_embs) # (1, 77, 768)
if gen_emb.shape[1] != 77:
print(f"Padding {gen_emb.shape} with zeros")
bs = gen_emb.shape[0]
clip_emb = 768
gen_emb = gen_emb.reshape(bs, -1, clip_emb) # (bs, T, 768)
seq_len = gen_emb.shape[1]
gen_emb = torch.cat([gen_emb, torch.zeros((bs, 77 - seq_len, clip_emb), device=gen_emb.device, dtype=gen_emb.dtype)], dim=1)
print('Padded to', gen_emb.shape)
gen_emb = gen_emb.repeat(self.num_gen_images, 1, 1) # (self.num_gen_images, 77, 768)
# OPTIM(jykoh): Only generate if scores are low.
if self.load_sd:
# If num_gen_images > 8, split into multiple batches (for GPU memory reasons).
gen_max_bs = 8
gen_images = []
for i in range(0, self.num_gen_images, gen_max_bs):
gen_images.extend(
self.sd_pipe(prompt_embeds=gen_emb[i:i+gen_max_bs], generator=generator,
guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images)
all_gen_pixels = []
for img in gen_images:
pixel_values = utils.get_pixel_values_for_model(self.model.feature_extractor, img.resize((224, 224)).convert('RGB'))
pixel_values = pixel_values.to(device=self.model.logit_scale.device, dtype=self.model.logit_scale.dtype)
all_gen_pixels.append(pixel_values)
if self.emb_matrix is not None:
all_gen_pixels = torch.stack(all_gen_pixels, dim=0)
gen_visual_embs = self.model.get_visual_embs(all_gen_pixels, mode='retrieval') # (1, D)
gen_visual_embs = gen_visual_embs / gen_visual_embs.norm(dim=-1, keepdim=True)
gen_visual_embs = gen_visual_embs.type(self.emb_matrix.dtype)
gen_rank_scores = (gen_visual_embs @ ret_emb.T).squeeze()
sorted_score_idx = torch.argsort(-gen_rank_scores)
# Rank images by retrieval score.
if self.num_gen_images > 1:
image_outputs['gen'] = [(gen_images[idx], gen_rank_scores[idx].item()) for idx in sorted_score_idx]
else:
image_outputs['gen'] = [(gen_images[0], gen_rank_scores.item())]
else:
image_outputs['gen'] = [(gen_images[0], 0)]
else:
image_outputs['gen'] = [gen_emb]
caption = self.model.tokenizer.batch_decode(generated_ids[:, last_ret_idx:ret_idx], skip_special_tokens=True)[0]
last_ret_idx = ret_idx + 1
return_outputs.append(utils.truncate_caption(caption) + f' {gen_prefix}')
return_outputs.append(image_outputs)
return return_outputs
def get_log_likelihood_scores(
self, prompts: List):
"""
Output the log likelihood of the given interleaved prompts.
Args:
prompts: List of interleaved PIL.Image.Image and strings representing input to the model.
Returns:
Log likelihood score of the prompt sequence.
"""
input_embs = []
input_ids = []
add_bos = True
for p in prompts:
if type(p) == Image.Image:
# Encode as image.
pixel_values = utils.get_pixel_values_for_model(self.model.feature_extractor, p)
pixel_values = pixel_values.to(device=self.model.logit_scale.device, dtype=self.model.logit_scale.dtype)
pixel_values = pixel_values[None, ...]
visual_embs = self.model.get_visual_embs(pixel_values, mode='captioning') # (1, n_visual_tokens, D)
input_embs.append(visual_embs)
id_ = torch.zeros(visual_embs.shape[:2], dtype=torch.int64).to(visual_embs.device) - 100
input_ids.append(id_)
elif type(p) == str:
text_ids = self.model.tokenizer(p, add_special_tokens=True, return_tensors="pt").input_ids.to(self.model.logit_scale.device)
if not add_bos:
# Remove <bos> tag.
text_ids = text_ids[:, 1:]
else:
# Only add <bos> once.
add_bos = False
text_embs = self.model.input_embeddings(text_ids) # (1, T, D)
input_embs.append(text_embs)
input_ids.append(text_ids)
else:
raise ValueError(f'Input prompts should be either PIL.Image.Image or str types, got {type(p)} instead.')
input_embs = torch.cat(input_embs, dim=1)
input_ids = torch.cat(input_ids, dim=1)
outputs = self.model.lm(inputs_embeds=input_embs, labels=input_ids, use_cache=False, output_hidden_states=True)
return -outputs.loss.item()
def load_gill(model_dir: str, load_ret_embs: bool = True, decision_model_fn: str = 'decision_model.pth.tar') -> GILL:
model_args_path = os.path.join(model_dir, 'model_args.json')
model_ckpt_path = os.path.join(model_dir, 'pretrained_ckpt.pth.tar')
embs_paths = [s for s in glob.glob(os.path.join(model_dir, 'cc3m*.npy'))]
if not os.path.exists(model_args_path):
raise ValueError(f'model_args.json does not exist in {model_dir}.')
if not os.path.exists(model_ckpt_path):
raise ValueError(f'pretrained_ckpt.pth.tar does not exist in {model_dir}.')
if not load_ret_embs or len(embs_paths) == 0:
if len(embs_paths) == 0:
print(f'cc3m.npy files do not exist in {model_dir}.')
print('Running the model without retrieval.')
path_array, emb_matrix = None, None
else:
# Load embeddings.
# Construct embedding matrix for nearest neighbor lookup.
path_array = []
emb_matrix = []
# These were precomputed for all CC3M images with `model.get_visual_embs(image, mode='retrieval')`.
for p in embs_paths:
with open(p, 'rb') as wf:
train_embs_data = pkl.load(wf)
path_array.extend(train_embs_data['paths'])
emb_matrix.extend(train_embs_data['embeddings'])
emb_matrix = np.stack(emb_matrix, axis=0)
# Number of paths should be equal to number of embeddings.
assert len(path_array) == emb_matrix.shape[0], (len(path_array), emb_matrix.shape)
with open(model_args_path, 'r') as f:
model_kwargs = json.load(f)
# Initialize tokenizer.
tokenizer = AutoTokenizer.from_pretrained(model_kwargs['opt_version'], use_fast=False)
if tokenizer.pad_token is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
# Add an image token for loss masking (and visualization) purposes.
tokenizer.add_special_tokens({"cls_token": "<|image|>"}) # add special image token to tokenizer
# Add [IMG] tokens to the vocabulary.
model_kwargs['retrieval_token_idx'] = []
for i in range(model_kwargs['num_tokens']):
print(f'Adding [IMG{i}] token to vocabulary.')
print(f'Before adding new token, tokenizer("[IMG{i}]") =', tokenizer(f'[IMG{i}]', add_special_tokens=False))
num_added_tokens = tokenizer.add_tokens(f'[IMG{i}]')
print(f'After adding {num_added_tokens} new tokens, tokenizer("[IMG{i}]") =', tokenizer(f'[IMG{i}]', add_special_tokens=False))
ret_token_idx = tokenizer(f'[IMG{i}]', add_special_tokens=False).input_ids
assert len(ret_token_idx) == 1, ret_token_idx
model_kwargs['retrieval_token_idx'].append(ret_token_idx[0])
# Use the same RET tokens for generation.
model_kwargs['gen_token_idx'] = model_kwargs['retrieval_token_idx']
args = namedtuple('args', model_kwargs)(**model_kwargs)
# Load decision model.
if decision_model_fn is not None:
decision_model_path = os.path.join(model_dir, decision_model_fn)
else:
decision_model_path = None
# Initialize model for inference.
model = GILL(tokenizer, args, path_array=path_array, emb_matrix=emb_matrix,
load_sd=True, num_gen_images=1, decision_model_path=decision_model_path)
model = model.eval()
model = model.bfloat16()
model = model.cuda()
# Load pretrained linear mappings and [IMG] embeddings.
checkpoint = torch.load(model_ckpt_path)
state_dict = {}
# This is needed if we train with DDP.
for k, v in checkpoint['state_dict'].items():
state_dict[k.replace('module.', '')] = v
img_token_embeddings = state_dict['model.input_embeddings.weight'].cpu().detach()
del state_dict['model.input_embeddings.weight']
model.load_state_dict(state_dict, strict=False)
# Copy over the embeddings of the [IMG] tokens (while loading the others from the pretrained LLM).
with torch.no_grad():
if 'share_ret_gen' in model_kwargs:
assert model_kwargs['share_ret_gen'], 'Model loading only supports share_ret_gen=True for now.'
model.model.input_embeddings.weight[-model_kwargs['num_tokens']:, :].copy_(img_token_embeddings)
if load_ret_embs and len(embs_paths) > 0:
logit_scale = model.model.logit_scale.exp()
emb_matrix = torch.tensor(emb_matrix, dtype=logit_scale.dtype).to(logit_scale.device)
emb_matrix = emb_matrix / emb_matrix.norm(dim=1, keepdim=True)
emb_matrix = logit_scale * emb_matrix
model.emb_matrix = emb_matrix
return model
| gill-main | gill/models.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.