python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import torch
from se3_transformer.model import SE3Transformer
from se3_transformer.model.fiber import Fiber
from tests.utils import get_random_graph, assign_relative_pos, get_max_diff, rot
# Tolerances for equivariance error abs( f(x) @ R - f(x @ R) )
TOL = 1e-3
CHANNELS, NODES = 32, 512
def _get_outputs(model, R):
feats0 = torch.randn(NODES, CHANNELS, 1)
feats1 = torch.randn(NODES, CHANNELS, 3)
coords = torch.randn(NODES, 3)
graph = get_random_graph(NODES)
if torch.cuda.is_available():
feats0 = feats0.cuda()
feats1 = feats1.cuda()
R = R.cuda()
coords = coords.cuda()
graph = graph.to('cuda')
model.cuda()
graph1 = assign_relative_pos(graph, coords)
out1 = model(graph1, {'0': feats0, '1': feats1}, {})
graph2 = assign_relative_pos(graph, coords @ R)
out2 = model(graph2, {'0': feats0, '1': feats1 @ R}, {})
return out1, out2
def _get_model(**kwargs):
return SE3Transformer(
num_layers=4,
fiber_in=Fiber.create(2, CHANNELS),
fiber_hidden=Fiber.create(3, CHANNELS),
fiber_out=Fiber.create(2, CHANNELS),
fiber_edge=Fiber({}),
num_heads=8,
channels_div=2,
**kwargs
)
def test_equivariance():
model = _get_model()
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2['0'], out1['0'], atol=TOL), \
f'type-0 features should be invariant {get_max_diff(out1["0"], out2["0"])}'
assert torch.allclose(out2['1'], (out1['1'] @ R), atol=TOL), \
f'type-1 features should be equivariant {get_max_diff(out1["1"] @ R, out2["1"])}'
def test_equivariance_pooled():
model = _get_model(pooling='avg', return_type=1)
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2, (out1 @ R), atol=TOL), \
f'type-1 features should be equivariant {get_max_diff(out1 @ R, out2)}'
def test_invariance_pooled():
model = _get_model(pooling='avg', return_type=0)
R = rot(*torch.rand(3))
if torch.cuda.is_available():
R = R.cuda()
out1, out2 = _get_outputs(model, R)
assert torch.allclose(out2, out1, atol=TOL), \
f'type-0 features should be invariant {get_max_diff(out1, out2)}'
| RFdiffusion-main | env/SE3Transformer/tests/test_equivariance.py |
RFdiffusion-main | env/SE3Transformer/tests/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import dgl
import torch
def get_random_graph(N, num_edges_factor=18):
graph = dgl.transform.remove_self_loop(dgl.rand_graph(N, N * num_edges_factor))
return graph
def assign_relative_pos(graph, coords):
src, dst = graph.edges()
graph.edata['rel_pos'] = coords[src] - coords[dst]
return graph
def get_max_diff(a, b):
return (a - b).abs().max().item()
def rot_z(gamma):
return torch.tensor([
[torch.cos(gamma), -torch.sin(gamma), 0],
[torch.sin(gamma), torch.cos(gamma), 0],
[0, 0, 1]
], dtype=gamma.dtype)
def rot_y(beta):
return torch.tensor([
[torch.cos(beta), 0, torch.sin(beta)],
[0, 1, 0],
[-torch.sin(beta), 0, torch.cos(beta)]
], dtype=beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
| RFdiffusion-main | env/SE3Transformer/tests/utils.py |
"""Helper class for handle symmetric assemblies."""
from pyrsistent import v
from scipy.spatial.transform import Rotation
import functools as fn
import torch
import string
import logging
import numpy as np
import pathlib
format_rots = lambda r: torch.tensor(r).float()
T3_ROTATIONS = [
torch.Tensor([
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]]).float(),
torch.Tensor([
[-1., -0., 0.],
[-0., 1., 0.],
[-0., 0., -1.]]).float(),
torch.Tensor([
[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]]).float(),
torch.Tensor([
[ 1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., -1.]]).float(),
]
saved_symmetries = ['tetrahedral', 'octahedral', 'icosahedral']
class SymGen:
def __init__(self, global_sym, recenter, radius, model_only_neibhbors=False):
self._log = logging.getLogger(__name__)
self._recenter = recenter
self._radius = radius
if global_sym.lower().startswith('c'):
# Cyclic symmetry
if not global_sym[1:].isdigit():
raise ValueError(f'Invalid cyclic symmetry {global_sym}')
self._log.info(
f'Initializing cyclic symmetry order {global_sym[1:]}.')
self._init_cyclic(int(global_sym[1:]))
self.apply_symmetry = self._apply_cyclic
elif global_sym.lower().startswith('d'):
# Dihedral symmetry
if not global_sym[1:].isdigit():
raise ValueError(f'Invalid dihedral symmetry {global_sym}')
self._log.info(
f'Initializing dihedral symmetry order {global_sym[1:]}.')
self._init_dihedral(int(global_sym[1:]))
# Applied the same way as cyclic symmetry
self.apply_symmetry = self._apply_cyclic
elif global_sym.lower() == 't3':
# Tetrahedral (T3) symmetry
self._log.info('Initializing T3 symmetry order.')
self.sym_rots = T3_ROTATIONS
self.order = 4
# Applied the same way as cyclic symmetry
self.apply_symmetry = self._apply_cyclic
elif global_sym == 'octahedral':
# Octahedral symmetry
self._log.info(
'Initializing octahedral symmetry.')
self._init_octahedral()
self.apply_symmetry = self._apply_octahedral
elif global_sym.lower() in saved_symmetries:
# Using a saved symmetry
self._log.info('Initializing %s symmetry order.'%global_sym)
self._init_from_symrots_file(global_sym)
# Applied the same way as cyclic symmetry
self.apply_symmetry = self._apply_cyclic
else:
raise ValueError(f'Unrecognized symmetry {global_sym}')
self.res_idx_procesing = fn.partial(
self._lin_chainbreaks, num_breaks=self.order)
#####################
## Cyclic symmetry ##
#####################
def _init_cyclic(self, order):
sym_rots = []
for i in range(order):
deg = i * 360.0 / order
r = Rotation.from_euler('z', deg, degrees=True)
sym_rots.append(format_rots(r.as_matrix()))
self.sym_rots = sym_rots
self.order = order
def _apply_cyclic(self, coords_in, seq_in):
coords_out = torch.clone(coords_in)
seq_out = torch.clone(seq_in)
if seq_out.shape[0] % self.order != 0:
raise ValueError(
f'Sequence length must be divisble by {self.order}')
subunit_len = seq_out.shape[0] // self.order
for i in range(self.order):
start_i = subunit_len * i
end_i = subunit_len * (i+1)
coords_out[start_i:end_i] = torch.einsum(
'bnj,kj->bnk', coords_out[:subunit_len], self.sym_rots[i])
seq_out[start_i:end_i] = seq_out[:subunit_len]
return coords_out, seq_out
def _lin_chainbreaks(self, num_breaks, res_idx, offset=None):
assert res_idx.ndim == 2
res_idx = torch.clone(res_idx)
subunit_len = res_idx.shape[-1] // num_breaks
chain_delimiters = []
if offset is None:
offset = res_idx.shape[-1]
for i in range(num_breaks):
start_i = subunit_len * i
end_i = subunit_len * (i+1)
chain_labels = list(string.ascii_uppercase) + [str(i+j) for i in
string.ascii_uppercase for j in string.ascii_uppercase]
chain_delimiters.extend(
[chain_labels[i] for _ in range(subunit_len)]
)
res_idx[:, start_i:end_i] = res_idx[:, start_i:end_i] + offset * (i+1)
return res_idx, chain_delimiters
#######################
## Dihedral symmetry ##
#######################
def _init_dihedral(self, order):
sym_rots = []
flip = Rotation.from_euler('x', 180, degrees=True).as_matrix()
for i in range(order):
deg = i * 360.0 / order
rot = Rotation.from_euler('z', deg, degrees=True).as_matrix()
sym_rots.append(format_rots(rot))
rot2 = flip @ rot
sym_rots.append(format_rots(rot2))
self.sym_rots = sym_rots
self.order = order * 2
#########################
## Octahedral symmetry ##
#########################
def _init_octahedral(self):
sym_rots = np.load(f"{pathlib.Path(__file__).parent.resolve()}/sym_rots.npz")
self.sym_rots = [
torch.tensor(v_i, dtype=torch.float32)
for v_i in sym_rots['octahedral']
]
self.order = len(self.sym_rots)
def _apply_octahedral(self, coords_in, seq_in):
coords_out = torch.clone(coords_in)
seq_out = torch.clone(seq_in)
if seq_out.shape[0] % self.order != 0:
raise ValueError(
f'Sequence length must be divisble by {self.order}')
subunit_len = seq_out.shape[0] // self.order
base_axis = torch.tensor([self._radius, 0., 0.])[None]
for i in range(self.order):
start_i = subunit_len * i
end_i = subunit_len * (i+1)
subunit_chain = torch.einsum(
'bnj,kj->bnk', coords_in[:subunit_len], self.sym_rots[i])
if self._recenter:
center = torch.mean(subunit_chain[:, 1, :], axis=0)
subunit_chain -= center[None, None, :]
rotated_axis = torch.einsum(
'nj,kj->nk', base_axis, self.sym_rots[i])
subunit_chain += rotated_axis[:, None, :]
coords_out[start_i:end_i] = subunit_chain
seq_out[start_i:end_i] = seq_out[:subunit_len]
return coords_out, seq_out
#######################
## symmetry from file #
#######################
def _init_from_symrots_file(self, name):
""" _init_from_symrots_file initializes using
./inference/sym_rots.npz
Args:
name: name of symmetry (of tetrahedral, octahedral, icosahedral)
sets self.sym_rots to be a list of torch.tensor of shape [3, 3]
"""
assert name in saved_symmetries, name + " not in " + str(saved_symmetries)
# Load in list of rotation matrices for `name`
fn = f"{pathlib.Path(__file__).parent.resolve()}/sym_rots.npz"
obj = np.load(fn)
symms = None
for k, v in obj.items():
if str(k) == name: symms = v
assert symms is not None, "%s not found in %s"%(name, fn)
self.sym_rots = [torch.tensor(v_i, dtype=torch.float32) for v_i in symms]
self.order = len(self.sym_rots)
# Return if identity is the first rotation
if not np.isclose(((self.sym_rots[0]-np.eye(3))**2).sum(), 0):
# Move identity to be the first rotation
for i, rot in enumerate(self.sym_rots):
if np.isclose(((rot-np.eye(3))**2).sum(), 0):
self.sym_rots = [self.sym_rots.pop(i)] + self.sym_rots
assert len(self.sym_rots) == self.order
assert np.isclose(((self.sym_rots[0]-np.eye(3))**2).sum(), 0)
def close_neighbors(self):
"""close_neighbors finds the rotations within self.sym_rots that
correspond to close neighbors.
Returns:
list of rotation matrices corresponding to the identity and close neighbors
"""
# set of small rotation angle rotations
rel_rot = lambda M: np.linalg.norm(Rotation.from_matrix(M).as_rotvec())
rel_rots = [(i+1, rel_rot(M)) for i, M in enumerate(self.sym_rots[1:])]
min_rot = min(rel_rot_val[1] for rel_rot_val in rel_rots)
close_rots = [np.eye(3)] + [
self.sym_rots[i] for i, rel_rot_val in rel_rots if
np.isclose(rel_rot_val, min_rot)
]
return close_rots
| RFdiffusion-main | inference/symmetry.py |
import torch
import numpy as np
from omegaconf import DictConfig, OmegaConf
from RoseTTAFoldModel import RoseTTAFoldModule
from kinematics import get_init_xyz, xyz_to_t2d
from diffusion import Diffuser
from chemical import seq2chars
from util_module import ComputeAllAtomCoords
from contigs import ContigMap
from inference import utils as iu
from potentials.manager import PotentialManager
from inference import symmetry
import logging
import torch.nn.functional as nn
import util
import hydra
from hydra.core.hydra_config import HydraConfig
import os
import sys
SCRIPT_DIR=os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_DIR + '../') # to access RF structure prediction stuff
from model_input_logger import pickle_function_call
TOR_INDICES = util.torsion_indices
TOR_CAN_FLIP = util.torsion_can_flip
REF_ANGLES = util.reference_angles
# Check for cache schedule
if not os.path.exists(f'{SCRIPT_DIR}/../schedules'):
os.mkdir(f'{SCRIPT_DIR}/../schedules')
class Sampler:
def __init__(self, conf: DictConfig):
"""
Initialize sampler.
Args:
conf: Configuration.
"""
self.initialized = False
self.initialize(conf)
def initialize(self, conf: DictConfig) -> None:
"""
Initialize sampler.
Args:
conf: Configuration
- Selects appropriate model from input
- Assembles Config from model checkpoint and command line overrides
"""
self._log = logging.getLogger(__name__)
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
needs_model_reload = not self.initialized or conf.inference.ckpt_override_path != self._conf.inference.ckpt_override_path
# Assign config to Sampler
self._conf = conf
################################
### Select Appropriate Model ###
################################
# Initialize inference only helper objects to Sampler
if conf.inference.ckpt_override_path is not None:
self.ckpt_path = conf.inference.ckpt_override_path
print("WARNING: You're overriding the checkpoint path from the defaults. Check that the model you're providing can run with the inputs you're providing.")
else:
if conf.contigmap.inpaint_seq is not None or conf.contigmap.provide_seq is not None:
# use model trained for inpaint_seq
if conf.contigmap.provide_seq is not None:
# this is only used for partial diffusion
assert conf.diffuser.partial_T is not None, "The provide_seq input is specifically for partial diffusion"
if conf.scaffoldguided.scaffoldguided:
self.ckpt_path=f'{SCRIPT_DIR}/../models/InpaintSeq_Fold_ckpt.pt'
else:
self.ckpt_path = f'{SCRIPT_DIR}/../models/InpaintSeq_ckpt.pt'
elif conf.ppi.hotspot_res is not None and conf.scaffoldguided.scaffoldguided is False:
# use complex trained model
self.ckpt_path = f'{SCRIPT_DIR}/../models/Complex_base_ckpt.pt'
elif conf.scaffoldguided.scaffoldguided is True:
# use complex and secondary structure-guided model
self.ckpt_path = f'{SCRIPT_DIR}/../models/Complex_Fold_base_ckpt.pt'
else:
# use default model
self.ckpt_path = f'{SCRIPT_DIR}/../models/Base_ckpt.pt'
# for saving in trb file:
assert self._conf.inference.trb_save_ckpt_path is None, "trb_save_ckpt_path is not the place to specify an input model. Specify in inference.ckpt_override_path"
self._conf['inference']['trb_save_ckpt_path']=self.ckpt_path
#######################
### Assemble Config ###
#######################
if needs_model_reload:
# Load checkpoint, so that we can assemble the config
self.load_checkpoint()
self.assemble_config_from_chk()
# Now actually load the model weights into RF
self.model = self.load_model()
else:
self.assemble_config_from_chk()
# self.initialize_sampler(conf)
self.initialized=True
# Initialize helper objects
self.inf_conf = self._conf.inference
self.contig_conf = self._conf.contigmap
self.denoiser_conf = self._conf.denoiser
self.ppi_conf = self._conf.ppi
self.potential_conf = self._conf.potentials
self.diffuser_conf = self._conf.diffuser
self.preprocess_conf = self._conf.preprocess
self.diffuser = Diffuser(**self._conf.diffuser, cache_dir=f'{SCRIPT_DIR}/../schedules')
###########################
### Initialise Symmetry ###
###########################
if self.inf_conf.symmetry is not None:
self.symmetry = symmetry.SymGen(
self.inf_conf.symmetry,
self.inf_conf.model_only_neighbors,
self.inf_conf.recenter,
self.inf_conf.radius,
)
else:
self.symmetry = None
self.allatom = ComputeAllAtomCoords().to(self.device)
if self.inf_conf.input_pdb is None:
# set default pdb
script_dir=os.path.dirname(os.path.realpath(__file__))
self.inf_conf.input_pdb=os.path.join(script_dir, '../examples/input_pdbs/1qys.pdb')
self.target_feats = iu.process_target(self.inf_conf.input_pdb, parse_hetatom=True, center=False)
self.chain_idx = None
##############################
### Handle Partial Noising ###
##############################
if self.diffuser_conf.partial_T:
assert self.diffuser_conf.partial_T <= self.diffuser_conf.T
self.t_step_input = int(self.diffuser_conf.partial_T)
else:
self.t_step_input = int(self.diffuser_conf.T)
@property
def T(self):
'''
Return the maximum number of timesteps
that this design protocol will perform.
Output:
T (int): The maximum number of timesteps to perform
'''
return self.diffuser_conf.T
def load_checkpoint(self) -> None:
"""Loads RF checkpoint, from which config can be generated."""
self._log.info(f'Reading checkpoint from {self.ckpt_path}')
print('This is inf_conf.ckpt_path')
print(self.ckpt_path)
self.ckpt = torch.load(
self.ckpt_path, map_location=self.device)
def assemble_config_from_chk(self) -> None:
"""
Function for loading model config from checkpoint directly.
Takes:
- config file
Actions:
- Replaces all -model and -diffuser items
- Throws a warning if there are items in -model and -diffuser that aren't in the checkpoint
This throws an error if there is a flag in the checkpoint 'config_dict' that isn't in the inference config.
This should ensure that whenever a feature is added in the training setup, it is accounted for in the inference script.
"""
# get overrides to re-apply after building the config from the checkpoint
overrides = []
if HydraConfig.initialized():
overrides = HydraConfig.get().overrides.task
print("Assembling -model, -diffuser and -preprocess configs from checkpoint")
for cat in ['model','diffuser','preprocess']:
for key in self._conf[cat]:
try:
print(f"USING MODEL CONFIG: self._conf[{cat}][{key}] = {self.ckpt['config_dict'][cat][key]}")
self._conf[cat][key] = self.ckpt['config_dict'][cat][key]
except:
pass
# add overrides back in again
for override in overrides:
if override.split(".")[0] in ['model','diffuser','preprocess']:
print(f'WARNING: You are changing {override.split("=")[0]} from the value this model was trained with. Are you sure you know what you are doing?')
mytype = type(self._conf[override.split(".")[0]][override.split(".")[1].split("=")[0]])
self._conf[override.split(".")[0]][override.split(".")[1].split("=")[0]] = mytype(override.split("=")[1])
def load_model(self):
"""Create RosettaFold model from preloaded checkpoint."""
# Read input dimensions from checkpoint.
self.d_t1d=self._conf.preprocess.d_t1d
self.d_t2d=self._conf.preprocess.d_t2d
model = RoseTTAFoldModule(**self._conf.model, d_t1d=self.d_t1d, d_t2d=self.d_t2d, T=self._conf.diffuser.T).to(self.device)
if self._conf.logging.inputs:
pickle_dir = pickle_function_call(model, 'forward', 'inference')
print(f'pickle_dir: {pickle_dir}')
model = model.eval()
self._log.info(f'Loading checkpoint.')
model.load_state_dict(self.ckpt['model_state_dict'], strict=True)
return model
def construct_contig(self, target_feats):
"""
Construct contig class describing the protein to be generated
"""
self._log.info(f'Using contig: {self.contig_conf.contigs}')
return ContigMap(target_feats, **self.contig_conf)
def construct_denoiser(self, L, visible):
"""Make length-specific denoiser."""
denoise_kwargs = OmegaConf.to_container(self.diffuser_conf)
denoise_kwargs.update(OmegaConf.to_container(self.denoiser_conf))
denoise_kwargs.update({
'L': L,
'diffuser': self.diffuser,
'potential_manager': self.potential_manager,
'visible': visible
})
return iu.Denoise(**denoise_kwargs)
def sample_init(self, return_forward_trajectory=False):
"""
Initial features to start the sampling process.
Modify signature and function body for different initialization
based on the config.
Returns:
xt: Starting positions with a portion of them randomly sampled.
seq_t: Starting sequence with a portion of them set to unknown.
"""
#######################
### Parse input pdb ###
#######################
self.target_feats = iu.process_target(self.inf_conf.input_pdb, parse_hetatom=True, center=False)
################################
### Generate specific contig ###
################################
# Generate a specific contig from the range of possibilities specified at input
self.contig_map = self.construct_contig(self.target_feats)
self.mappings = self.contig_map.get_mappings()
self.mask_seq = torch.from_numpy(self.contig_map.inpaint_seq)[None,:]
self.mask_str = torch.from_numpy(self.contig_map.inpaint_str)[None,:]
self.binderlen = len(self.contig_map.inpaint)
####################
### Get Hotspots ###
####################
self.hotspot_0idx=iu.get_idx0_hotspots(self.mappings, self.ppi_conf, self.binderlen)
#####################################
### Initialise Potentials Manager ###
#####################################
self.potential_manager = PotentialManager(self.potential_conf,
self.ppi_conf,
self.diffuser_conf,
self.inf_conf,
self.hotspot_0idx,
self.binderlen)
###################################
### Initialize other attributes ###
###################################
xyz_27 = self.target_feats['xyz_27']
mask_27 = self.target_feats['mask_27']
seq_orig = self.target_feats['seq']
L_mapped = len(self.contig_map.ref)
contig_map=self.contig_map
self.diffusion_mask = self.mask_str
self.chain_idx=['A' if i < self.binderlen else 'B' for i in range(L_mapped)]
####################################
### Generate initial coordinates ###
####################################
if self.diffuser_conf.partial_T:
assert xyz_27.shape[0] == L_mapped, f"there must be a coordinate in the input PDB for \
each residue implied by the contig string for partial diffusion. length of \
input PDB != length of contig string: {xyz_27.shape[0]} != {L_mapped}"
assert contig_map.hal_idx0 == contig_map.ref_idx0, f'for partial diffusion there can \
be no offset between the index of a residue in the input and the index of the \
residue in the output, {contig_map.hal_idx0} != {contig_map.ref_idx0}'
# Partially diffusing from a known structure
xyz_mapped=xyz_27
atom_mask_mapped = mask_27
else:
# Fully diffusing from points initialised at the origin
# adjust size of input xt according to residue map
xyz_mapped = torch.full((1,1,L_mapped,27,3), np.nan)
xyz_mapped[:, :, contig_map.hal_idx0, ...] = xyz_27[contig_map.ref_idx0,...]
xyz_motif_prealign = xyz_mapped.clone()
motif_prealign_com = xyz_motif_prealign[0,0,:,1].mean(dim=0)
self.motif_com = xyz_27[contig_map.ref_idx0,1].mean(dim=0)
xyz_mapped = get_init_xyz(xyz_mapped).squeeze()
# adjust the size of the input atom map
atom_mask_mapped = torch.full((L_mapped, 27), False)
atom_mask_mapped[contig_map.hal_idx0] = mask_27[contig_map.ref_idx0]
# Diffuse the contig-mapped coordinates
if self.diffuser_conf.partial_T:
assert self.diffuser_conf.partial_T <= self.diffuser_conf.T, "Partial_T must be less than T"
self.t_step_input = int(self.diffuser_conf.partial_T)
else:
self.t_step_input = int(self.diffuser_conf.T)
t_list = np.arange(1, self.t_step_input+1)
#################################
### Generate initial sequence ###
#################################
seq_t = torch.full((1,L_mapped), 21).squeeze() # 21 is the mask token
seq_t[contig_map.hal_idx0] = seq_orig[contig_map.ref_idx0]
# Unmask sequence if desired
if self._conf.contigmap.provide_seq is not None:
seq_t[self.mask_seq.squeeze()] = seq_orig[self.mask_seq.squeeze()]
seq_t[~self.mask_seq.squeeze()] = 21
seq_t = torch.nn.functional.one_hot(seq_t, num_classes=22).float() # [L,22]
seq_orig = torch.nn.functional.one_hot(seq_orig, num_classes=22).float() # [L,22]
fa_stack, xyz_true = self.diffuser.diffuse_pose(
xyz_mapped,
torch.clone(seq_t),
atom_mask_mapped.squeeze(),
diffusion_mask=self.diffusion_mask.squeeze(),
t_list=t_list)
xT = fa_stack[-1].squeeze()[:,:14,:]
xt = torch.clone(xT)
self.denoiser = self.construct_denoiser(len(self.contig_map.ref), visible=self.mask_seq.squeeze())
######################
### Apply Symmetry ###
######################
if self.symmetry is not None:
xt, seq_t = self.symmetry.apply_symmetry(xt, seq_t)
self._log.info(f'Sequence init: {seq2chars(torch.argmax(seq_t, dim=-1))}')
self.msa_prev = None
self.pair_prev = None
self.state_prev = None
#########################################
### Parse ligand for ligand potential ###
#########################################
if self.potential_conf.guiding_potentials is not None:
if any(list(filter(lambda x: "substrate_contacts" in x, self.potential_conf.guiding_potentials))):
assert len(self.target_feats['xyz_het']) > 0, "If you're using the Substrate Contact potential, \
you need to make sure there's a ligand in the input_pdb file!"
het_names = np.array([i['name'].strip() for i in self.target_feats['info_het']])
xyz_het = self.target_feats['xyz_het'][het_names == self._conf.potentials.substrate]
xyz_het = torch.from_numpy(xyz_het)
assert xyz_het.shape[0] > 0, f'expected >0 heteroatoms from ligand with name {self._conf.potentials.substrate}'
xyz_motif_prealign = xyz_motif_prealign[0,0][self.diffusion_mask.squeeze()]
motif_prealign_com = xyz_motif_prealign[:,1].mean(dim=0)
xyz_het_com = xyz_het.mean(dim=0)
for pot in self.potential_manager.potentials_to_apply:
pot.motif_substrate_atoms = xyz_het
pot.diffusion_mask = self.diffusion_mask.squeeze()
pot.xyz_motif = xyz_motif_prealign
pot.diffuser = self.diffuser
return xt, seq_t
def _preprocess(self, seq, xyz_t, t, repack=False):
"""
Function to prepare inputs to diffusion model
seq (L,22) one-hot sequence
msa_masked (1,1,L,48)
msa_full (1,1,L,25)
xyz_t (L,14,3) template crds (diffused)
t1d (1,L,28) this is the t1d before tacking on the chi angles:
- seq + unknown/mask (21)
- global timestep (1-t/T if not motif else 1) (1)
MODEL SPECIFIC:
- contacting residues: for ppi. Target residues in contact with binder (1)
- empty feature (legacy) (1)
- ss (H, E, L, MASK) (4)
t2d (1, L, L, 45)
- last plane is block adjacency
"""
L = seq.shape[0]
T = self.T
binderlen = self.binderlen
target_res = self.ppi_conf.hotspot_res
##################
### msa_masked ###
##################
msa_masked = torch.zeros((1,1,L,48))
msa_masked[:,:,:,:22] = seq[None, None]
msa_masked[:,:,:,22:44] = seq[None, None]
msa_masked[:,:,0,46] = 1.0
msa_masked[:,:,-1,47] = 1.0
################
### msa_full ###
################
msa_full = torch.zeros((1,1,L,25))
msa_full[:,:,:,:22] = seq[None, None]
msa_full[:,:,0,23] = 1.0
msa_full[:,:,-1,24] = 1.0
###########
### t1d ###
###########
# Here we need to go from one hot with 22 classes to one hot with 21 classes (last plane is missing token)
t1d = torch.zeros((1,1,L,21))
seqt1d = torch.clone(seq)
for idx in range(L):
if seqt1d[idx,21] == 1:
seqt1d[idx,20] = 1
seqt1d[idx,21] = 0
t1d[:,:,:,:21] = seqt1d[None,None,:,:21]
# Set timestep feature to 1 where diffusion mask is True, else 1-t/T
timefeature = torch.zeros((L)).float()
timefeature[self.mask_str.squeeze()] = 1
timefeature[~self.mask_str.squeeze()] = 1 - t/self.T
timefeature = timefeature[None,None,...,None]
t1d = torch.cat((t1d, timefeature), dim=-1).float()
#############
### xyz_t ###
#############
if self.preprocess_conf.sidechain_input:
xyz_t[torch.where(seq == 21, True, False),3:,:] = float('nan')
else:
xyz_t[~self.mask_str.squeeze(),3:,:] = float('nan')
xyz_t=xyz_t[None, None]
xyz_t = torch.cat((xyz_t, torch.full((1,1,L,13,3), float('nan'))), dim=3)
###########
### t2d ###
###########
t2d = xyz_to_t2d(xyz_t)
###########
### idx ###
###########
idx = torch.tensor(self.contig_map.rf)[None]
###############
### alpha_t ###
###############
seq_tmp = t1d[...,:-1].argmax(dim=-1).reshape(-1,L)
alpha, _, alpha_mask, _ = util.get_torsions(xyz_t.reshape(-1,L,27,3), seq_tmp, TOR_INDICES, TOR_CAN_FLIP, REF_ANGLES)
alpha_mask = torch.logical_and(alpha_mask, ~torch.isnan(alpha[...,0]))
alpha[torch.isnan(alpha)] = 0.0
alpha = alpha.reshape(1,-1,L,10,2)
alpha_mask = alpha_mask.reshape(1,-1,L,10,1)
alpha_t = torch.cat((alpha, alpha_mask), dim=-1).reshape(1, -1, L, 30)
#put tensors on device
msa_masked = msa_masked.to(self.device)
msa_full = msa_full.to(self.device)
seq = seq.to(self.device)
xyz_t = xyz_t.to(self.device)
idx = idx.to(self.device)
t1d = t1d.to(self.device)
t2d = t2d.to(self.device)
alpha_t = alpha_t.to(self.device)
######################
### added_features ###
######################
if self.preprocess_conf.d_t1d >= 24: # add hotspot residues
hotspot_tens = torch.zeros(L).float()
if self.ppi_conf.hotspot_res is None:
print("WARNING: you're using a model trained on complexes and hotspot residues, without specifying hotspots.\
If you're doing monomer diffusion this is fine")
hotspot_idx=[]
else:
hotspots = [(i[0],int(i[1:])) for i in self.ppi_conf.hotspot_res]
hotspot_idx=[]
for i,res in enumerate(self.contig_map.con_ref_pdb_idx):
if res in hotspots:
hotspot_idx.append(self.contig_map.hal_idx0[i])
hotspot_tens[hotspot_idx] = 1.0
# Add blank (legacy) feature and hotspot tensor
t1d=torch.cat((t1d, torch.zeros_like(t1d[...,:1]), hotspot_tens[None,None,...,None].to(self.device)), dim=-1)
return msa_masked, msa_full, seq[None], torch.squeeze(xyz_t, dim=0), idx, t1d, t2d, xyz_t, alpha_t
def sample_step(self, *, t, x_t, seq_init, final_step):
'''Generate the next pose that the model should be supplied at timestep t-1.
Args:
t (int): The timestep that has just been predicted
seq_t (torch.tensor): (L,22) The sequence at the beginning of this timestep
x_t (torch.tensor): (L,14,3) The residue positions at the beginning of this timestep
seq_init (torch.tensor): (L,22) The initialized sequence used in updating the sequence.
Returns:
px0: (L,14,3) The model's prediction of x0.
x_t_1: (L,14,3) The updated positions of the next step.
seq_t_1: (L,22) The updated sequence of the next step.
tors_t_1: (L, ?) The updated torsion angles of the next step.
plddt: (L, 1) Predicted lDDT of x0.
'''
msa_masked, msa_full, seq_in, xt_in, idx_pdb, t1d, t2d, xyz_t, alpha_t = self._preprocess(
seq_init, x_t, t)
N,L = msa_masked.shape[:2]
if self.symmetry is not None:
idx_pdb, self.chain_idx = self.symmetry.res_idx_procesing(res_idx=idx_pdb)
msa_prev = None
pair_prev = None
state_prev = None
with torch.no_grad():
msa_prev, pair_prev, px0, state_prev, alpha, logits, plddt = self.model(msa_masked,
msa_full,
seq_in,
xt_in,
idx_pdb,
t1d=t1d,
t2d=t2d,
xyz_t=xyz_t,
alpha_t=alpha_t,
msa_prev = msa_prev,
pair_prev = pair_prev,
state_prev = state_prev,
t=torch.tensor(t),
return_infer=True,
motif_mask=self.diffusion_mask.squeeze().to(self.device))
# prediction of X0
_, px0 = self.allatom(torch.argmax(seq_in, dim=-1), px0, alpha)
px0 = px0.squeeze()[:,:14]
#####################
### Get next pose ###
#####################
if t > final_step:
seq_t_1 = nn.one_hot(seq_init,num_classes=22).to(self.device)
x_t_1, px0 = self.denoiser.get_next_pose(
xt=x_t,
px0=px0,
t=t,
diffusion_mask=self.mask_str.squeeze(),
align_motif=self.inf_conf.align_motif
)
else:
x_t_1 = torch.clone(px0).to(x_t.device)
seq_t_1 = torch.clone(seq_init)
px0 = px0.to(x_t.device)
if self.symmetry is not None:
x_t_1, seq_t_1 = self.symmetry.apply_symmetry(x_t_1, seq_t_1)
return px0, x_t_1, seq_t_1, plddt
class SelfConditioning(Sampler):
"""
Model Runner for self conditioning
pX0[t+1] is provided as a template input to the model at time t
"""
def sample_step(self, *, t, x_t, seq_init, final_step):
'''
Generate the next pose that the model should be supplied at timestep t-1.
Args:
t (int): The timestep that has just been predicted
seq_t (torch.tensor): (L,22) The sequence at the beginning of this timestep
x_t (torch.tensor): (L,14,3) The residue positions at the beginning of this timestep
seq_init (torch.tensor): (L,22) The initialized sequence used in updating the sequence.
Returns:
px0: (L,14,3) The model's prediction of x0.
x_t_1: (L,14,3) The updated positions of the next step.
seq_t_1: (L) The sequence to the next step (== seq_init)
plddt: (L, 1) Predicted lDDT of x0.
'''
msa_masked, msa_full, seq_in, xt_in, idx_pdb, t1d, t2d, xyz_t, alpha_t = self._preprocess(
seq_init, x_t, t)
B,N,L = xyz_t.shape[:3]
##################################
######## Str Self Cond ###########
##################################
if (t < self.diffuser.T) and (t != self.diffuser_conf.partial_T):
zeros = torch.zeros(B,1,L,24,3).float().to(xyz_t.device)
xyz_t = torch.cat((self.prev_pred.unsqueeze(1),zeros), dim=-2) # [B,T,L,27,3]
t2d_44 = xyz_to_t2d(xyz_t) # [B,T,L,L,44]
else:
xyz_t = torch.zeros_like(xyz_t)
t2d_44 = torch.zeros_like(t2d[...,:44])
# No effect if t2d is only dim 44
t2d[...,:44] = t2d_44
if self.symmetry is not None:
idx_pdb, self.chain_idx = self.symmetry.res_idx_procesing(res_idx=idx_pdb)
####################
### Forward Pass ###
####################
with torch.no_grad():
msa_prev, pair_prev, px0, state_prev, alpha, logits, plddt = self.model(msa_masked,
msa_full,
seq_in,
xt_in,
idx_pdb,
t1d=t1d,
t2d=t2d,
xyz_t=xyz_t,
alpha_t=alpha_t,
msa_prev = None,
pair_prev = None,
state_prev = None,
t=torch.tensor(t),
return_infer=True,
motif_mask=self.diffusion_mask.squeeze().to(self.device))
if self.symmetry is not None and self.inf_conf.symmetric_self_cond:
px0 = self.symmetrise_prev_pred(px0=px0,seq_in=seq_in, alpha=alpha)[:,:,:3]
self.prev_pred = torch.clone(px0)
# prediction of X0
_, px0 = self.allatom(torch.argmax(seq_in, dim=-1), px0, alpha)
px0 = px0.squeeze()[:,:14]
###########################
### Generate Next Input ###
###########################
seq_t_1 = torch.clone(seq_init)
if t > final_step:
x_t_1, px0 = self.denoiser.get_next_pose(
xt=x_t,
px0=px0,
t=t,
diffusion_mask=self.mask_str.squeeze(),
align_motif=self.inf_conf.align_motif,
include_motif_sidechains=self.preprocess_conf.motif_sidechain_input
)
self._log.info(
f'Timestep {t}, input to next step: { seq2chars(torch.argmax(seq_t_1, dim=-1).tolist())}')
else:
x_t_1 = torch.clone(px0).to(x_t.device)
px0 = px0.to(x_t.device)
######################
### Apply symmetry ###
######################
if self.symmetry is not None:
x_t_1, seq_t_1 = self.symmetry.apply_symmetry(x_t_1, seq_t_1)
return px0, x_t_1, seq_t_1, plddt
def symmetrise_prev_pred(self, px0, seq_in, alpha):
"""
Method for symmetrising px0 output for self-conditioning
"""
_,px0_aa = self.allatom(torch.argmax(seq_in, dim=-1), px0, alpha)
px0_sym,_ = self.symmetry.apply_symmetry(px0_aa.to('cpu').squeeze()[:,:14], torch.argmax(seq_in, dim=-1).squeeze().to('cpu'))
px0_sym = px0_sym[None].to(self.device)
return px0_sym
class ScaffoldedSampler(SelfConditioning):
"""
Model Runner for Scaffold-Constrained diffusion
"""
def __init__(self, conf: DictConfig):
"""
Initialize scaffolded sampler.
Two basic approaches here:
i) Given a block adjacency/secondary structure input, generate a fold (in the presence or absence of a target)
- This allows easy generation of binders or specific folds
- Allows simple expansion of an input, to sample different lengths
ii) Providing a contig input and corresponding block adjacency/secondary structure input
- This allows mixed motif scaffolding and fold-conditioning.
- Adjacency/secondary structure inputs must correspond exactly in length to the contig string
"""
super().__init__(conf)
# initialize BlockAdjacency sampling class
self.blockadjacency = iu.BlockAdjacency(conf.scaffoldguided, conf.inference.num_designs)
#################################################
### Initialize target, if doing binder design ###
#################################################
if conf.scaffoldguided.target_pdb:
self.target = iu.Target(conf.scaffoldguided, conf.ppi.hotspot_res)
self.target_pdb = self.target.get_target()
if conf.scaffoldguided.target_ss is not None:
self.target_ss = torch.load(conf.scaffoldguided.target_ss).long()
self.target_ss = torch.nn.functional.one_hot(self.target_ss, num_classes=4)
if self._conf.scaffoldguided.contig_crop is not None:
self.target_ss=self.target_ss[self.target_pdb['crop_mask']]
if conf.scaffoldguided.target_adj is not None:
self.target_adj = torch.load(conf.scaffoldguided.target_adj).long()
self.target_adj=torch.nn.functional.one_hot(self.target_adj, num_classes=3)
if self._conf.scaffoldguided.contig_crop is not None:
self.target_adj=self.target_adj[self.target_pdb['crop_mask']]
self.target_adj=self.target_adj[:,self.target_pdb['crop_mask']]
else:
self.target = None
self.target_pdb=False
def sample_init(self):
"""
Wrapper method for taking secondary structure + adj, and outputting xt, seq_t
"""
##########################
### Process Fold Input ###
##########################
self.L, self.ss, self.adj = self.blockadjacency.get_scaffold()
self.adj = nn.one_hot(self.adj.long(), num_classes=3)
##############################
### Auto-contig generation ###
##############################
if self.contig_conf.contigs is None:
# process target
xT = torch.full((self.L, 27,3), np.nan)
xT = get_init_xyz(xT[None,None]).squeeze()
seq_T = torch.full((self.L,),21)
self.diffusion_mask = torch.full((self.L,),False)
atom_mask = torch.full((self.L,27), False)
self.binderlen=self.L
if self.target:
target_L = np.shape(self.target_pdb['xyz'])[0]
# xyz
target_xyz = torch.full((target_L, 27, 3), np.nan)
target_xyz[:,:14,:] = torch.from_numpy(self.target_pdb['xyz'])
xT = torch.cat((xT, target_xyz), dim=0)
# seq
seq_T = torch.cat((seq_T, torch.from_numpy(self.target_pdb['seq'])), dim=0)
# diffusion mask
self.diffusion_mask = torch.cat((self.diffusion_mask, torch.full((target_L,), True)),dim=0)
# atom mask
mask_27 = torch.full((target_L, 27), False)
mask_27[:,:14] = torch.from_numpy(self.target_pdb['mask'])
atom_mask = torch.cat((atom_mask, mask_27), dim=0)
self.L += target_L
# generate contigmap object
contig = []
for idx,i in enumerate(self.target_pdb['pdb_idx'][:-1]):
if idx==0:
start=i[1]
if i[1] + 1 != self.target_pdb['pdb_idx'][idx+1][1] or i[0] != self.target_pdb['pdb_idx'][idx+1][0]:
contig.append(f'{i[0]}{start}-{i[1]}/0 ')
start = self.target_pdb['pdb_idx'][idx+1][1]
contig.append(f"{self.target_pdb['pdb_idx'][-1][0]}{start}-{self.target_pdb['pdb_idx'][-1][1]}/0 ")
contig.append(f"{self.binderlen}-{self.binderlen}")
contig = ["".join(contig)]
else:
contig = [f"{self.binderlen}-{self.binderlen}"]
self.contig_map=ContigMap(self.target_pdb, contig)
self.mappings = self.contig_map.get_mappings()
self.mask_seq = self.diffusion_mask
self.mask_str = self.diffusion_mask
L_mapped=len(self.contig_map.ref)
############################
### Specific Contig mode ###
############################
else:
# get contigmap from command line
assert self.target is None, "Giving a target is the wrong way of handling this is you're doing contigs and secondary structure"
# process target and reinitialise potential_manager. This is here because the 'target' is always set up to be the second chain in out inputs.
self.target_feats = iu.process_target(self.inf_conf.input_pdb)
self.contig_map = self.construct_contig(self.target_feats)
self.mappings = self.contig_map.get_mappings()
self.mask_seq = torch.from_numpy(self.contig_map.inpaint_seq)[None,:]
self.mask_str = torch.from_numpy(self.contig_map.inpaint_str)[None,:]
self.binderlen = len(self.contig_map.inpaint)
target_feats = self.target_feats
contig_map = self.contig_map
xyz_27 = target_feats['xyz_27']
mask_27 = target_feats['mask_27']
seq_orig = target_feats['seq']
L_mapped = len(self.contig_map.ref)
seq_T=torch.full((L_mapped,),21)
seq_T[contig_map.hal_idx0] = seq_orig[contig_map.ref_idx0]
seq_T[~self.mask_seq.squeeze()] = 21
assert L_mapped==self.adj.shape[0]
diffusion_mask = self.mask_str
self.diffusion_mask = diffusion_mask
xT = torch.full((1,1,L_mapped,27,3), np.nan)
xT[:, :, contig_map.hal_idx0, ...] = xyz_27[contig_map.ref_idx0,...]
xT = get_init_xyz(xT).squeeze()
atom_mask = torch.full((L_mapped, 27), False)
atom_mask[contig_map.hal_idx0] = mask_27[contig_map.ref_idx0]
####################
### Get hotspots ###
####################
self.hotspot_0idx=iu.get_idx0_hotspots(self.mappings, self.ppi_conf, self.binderlen)
#########################
### Set up potentials ###
#########################
self.potential_manager = PotentialManager(self.potential_conf,
self.ppi_conf,
self.diffuser_conf,
self.inf_conf,
self.hotspot_0idx,
self.binderlen)
self.chain_idx=['A' if i < self.binderlen else 'B' for i in range(self.L)]
########################
### Handle Partial T ###
########################
if self.diffuser_conf.partial_T:
assert self.diffuser_conf.partial_T <= self.diffuser_conf.T
self.t_step_input = int(self.diffuser_conf.partial_T)
else:
self.t_step_input = int(self.diffuser_conf.T)
t_list = np.arange(1, self.t_step_input+1)
seq_T=torch.nn.functional.one_hot(seq_T, num_classes=22).float()
fa_stack, xyz_true = self.diffuser.diffuse_pose(
xT,
torch.clone(seq_T),
atom_mask.squeeze(),
diffusion_mask=self.diffusion_mask.squeeze(),
t_list=t_list,
include_motif_sidechains=self.preprocess_conf.motif_sidechain_input)
#######################
### Set up Denoiser ###
#######################
self.denoiser = self.construct_denoiser(self.L, visible=self.mask_seq.squeeze())
xT = torch.clone(fa_stack[-1].squeeze()[:,:14,:])
return xT, seq_T
def _preprocess(self, seq, xyz_t, t):
msa_masked, msa_full, seq, xyz_prev, idx_pdb, t1d, t2d, xyz_t, alpha_t = super()._preprocess(seq, xyz_t, t, repack=False)
###################################
### Add Adj/Secondary Structure ###
###################################
assert self.preprocess_conf.d_t1d == 28, "The checkpoint you're using hasn't been trained with sec-struc/block adjacency features"
assert self.preprocess_conf.d_t2d == 47, "The checkpoint you're using hasn't been trained with sec-struc/block adjacency features"
#####################
### Handle Target ###
#####################
if self.target:
blank_ss = torch.nn.functional.one_hot(torch.full((self.L-self.binderlen,), 3), num_classes=4)
full_ss = torch.cat((self.ss, blank_ss), dim=0)
if self._conf.scaffoldguided.target_ss is not None:
full_ss[self.binderlen:] = self.target_ss
else:
full_ss = self.ss
t1d=torch.cat((t1d, full_ss[None,None].to(self.device)), dim=-1)
t1d = t1d.float()
###########
### t2d ###
###########
if self.d_t2d == 47:
if self.target:
full_adj = torch.zeros((self.L, self.L, 3))
full_adj[:,:,-1] = 1. #set to mask
full_adj[:self.binderlen, :self.binderlen] = self.adj
if self._conf.scaffoldguided.target_adj is not None:
full_adj[self.binderlen:,self.binderlen:] = self.target_adj
else:
full_adj = self.adj
t2d=torch.cat((t2d, full_adj[None,None].to(self.device)),dim=-1)
###########
### idx ###
###########
if self.target:
idx_pdb[:,self.binderlen:] += 200
return msa_masked, msa_full, seq, xyz_prev, idx_pdb, t1d, t2d, xyz_t, alpha_t
| RFdiffusion-main | inference/model_runners.py |
import numpy as np
import os
import sys
from omegaconf import DictConfig
from kinematics import xyz_to_t2d
import torch
import torch.nn.functional as nn
from diffusion import get_beta_schedule
from scipy.spatial.transform import Rotation as scipy_R
from util import rigid_from_3_points
from util_module import ComputeAllAtomCoords
from potentials.manager import PotentialManager
import util
import random
import logging
import string
from inference import model_runners
import hydra
import glob
###########################################################
#### Functions which can be called outside of Denoiser ####
###########################################################
def get_next_frames(xt, px0, t, diffuser, so3_type, diffusion_mask, noise_scale=1.0):
"""
get_next_frames gets updated frames using IGSO(3) + score_based reverse diffusion.
based on self.so3_type use score based update.
Generate frames at t-1
Rather than generating random rotations (as occurs during forward process), calculate rotation between xt and px0
Args:
xt: noised coordinates of shape [L, 14, 3]
px0: prediction of coordinates at t=0, of shape [L, 14, 3]
t: integer time step
diffuser: Diffuser object for reverse igSO3 sampling
so3_type: The type of SO3 noising being used ('igso3')
diffusion_mask: of shape [L] of type bool, True means not to be
updated (e.g. mask is true for motif residues)
noise_scale: scale factor for the noise added (IGSO3 only)
Returns:
backbone coordinates for step x_t-1 of shape [L, 3, 3]
"""
N_0 = px0[None, :, 0, :]
Ca_0 = px0[None, :, 1, :]
C_0 = px0[None, :, 2, :]
R_0, Ca_0 = rigid_from_3_points(N_0, Ca_0, C_0)
N_t = xt[None, :, 0, :]
Ca_t = xt[None, :, 1, :]
C_t = xt[None, :, 2, :]
R_t, Ca_t = rigid_from_3_points(N_t, Ca_t, C_t)
# this must be to normalize them or something
R_0 = scipy_R.from_matrix(R_0.squeeze().numpy()).as_matrix()
R_t = scipy_R.from_matrix(R_t.squeeze().numpy()).as_matrix()
L = R_t.shape[0]
all_rot_transitions = np.broadcast_to(np.identity(3), (L, 3, 3)).copy()
# Sample next frame for each residue
if so3_type == "igso3":
# don't do calculations on masked positions since they end up as identity matrix
all_rot_transitions[
~diffusion_mask
] = diffuser.so3_diffuser.reverse_sample_vectorized(
R_t[~diffusion_mask],
R_0[~diffusion_mask],
t,
noise_level=noise_scale,
mask=None,
return_perturb=True,
)
else:
assert False, "so3 diffusion type %s not implemented" % so3_type
all_rot_transitions = all_rot_transitions[:, None, :, :]
# Apply the interpolated rotation matrices to the coordinates
next_crds = (
np.einsum(
"lrij,laj->lrai",
all_rot_transitions,
xt[:, :3, :] - Ca_t.squeeze()[:, None, ...].numpy(),
)
+ Ca_t.squeeze()[:, None, None, ...].numpy()
)
# (L,3,3) set of backbone coordinates with slight rotation
return next_crds.squeeze(1)
def get_mu_xt_x0(xt, px0, t, beta_schedule, alphabar_schedule, eps=1e-6):
"""
Given xt, predicted x0 and the timestep t, give mu of x(t-1)
Assumes t is 0 indexed
"""
# sigma is predefined from beta. Often referred to as beta tilde t
t_idx = t - 1
sigma = (
(1 - alphabar_schedule[t_idx - 1]) / (1 - alphabar_schedule[t_idx])
) * beta_schedule[t_idx]
xt_ca = xt[:, 1, :]
px0_ca = px0[:, 1, :]
a = (
(torch.sqrt(alphabar_schedule[t_idx - 1] + eps) * beta_schedule[t_idx])
/ (1 - alphabar_schedule[t_idx])
) * px0_ca
b = (
(
torch.sqrt(1 - beta_schedule[t_idx] + eps)
* (1 - alphabar_schedule[t_idx - 1])
)
/ (1 - alphabar_schedule[t_idx])
) * xt_ca
mu = a + b
return mu, sigma
def get_next_ca(
xt,
px0,
t,
diffusion_mask,
crd_scale,
beta_schedule,
alphabar_schedule,
noise_scale=1.0,
):
"""
Given full atom x0 prediction (xyz coordinates), diffuse to x(t-1)
Parameters:
xt (L, 14/27, 3) set of coordinates
px0 (L, 14/27, 3) set of coordinates
t: time step. Note this is zero-index current time step, so are generating t-1
logits_aa (L x 20 ) amino acid probabilities at each position
seq_schedule (L): Tensor of bools, True is unmasked, False is masked. For this specific t
diffusion_mask (torch.tensor, required): Tensor of bools, True means NOT diffused at this residue, False means diffused
noise_scale: scale factor for the noise being added
"""
get_allatom = ComputeAllAtomCoords().to(device=xt.device)
L = len(xt)
# bring to origin after global alignment (when don't have a motif) or replace input motif and bring to origin, and then scale
px0 = px0 * crd_scale
xt = xt * crd_scale
# get mu(xt, x0)
mu, sigma = get_mu_xt_x0(
xt, px0, t, beta_schedule=beta_schedule, alphabar_schedule=alphabar_schedule
)
sampled_crds = torch.normal(mu, torch.sqrt(sigma * noise_scale))
delta = sampled_crds - xt[:, 1, :] # check sign of this is correct
if not diffusion_mask is None:
# Don't move motif
delta[diffusion_mask, ...] = 0
out_crds = xt + delta[:, None, :]
return out_crds / crd_scale, delta / crd_scale
def get_noise_schedule(T, noiseT, noise1, schedule_type):
"""
Function to create a schedule that varies the scale of noise given to the model over time
Parameters:
T: The total number of timesteps in the denoising trajectory
noiseT: The inital (t=T) noise scale
noise1: The final (t=1) noise scale
schedule_type: The type of function to use to interpolate between noiseT and noise1
Returns:
noise_schedule: A function which maps timestep to noise scale
"""
noise_schedules = {
"constant": lambda t: noiseT,
"linear": lambda t: ((t - 1) / (T - 1)) * (noiseT - noise1) + noise1,
}
assert (
schedule_type in noise_schedules
), f"noise_schedule must be one of {noise_schedules.keys()}. Received noise_schedule={schedule_type}. Exiting."
return noise_schedules[schedule_type]
class Denoise:
"""
Class for getting x(t-1) from predicted x0 and x(t)
Strategy:
Ca coordinates: Rediffuse to x(t-1) from predicted x0
Frames: Approximate update from rotation score
Torsions: 1/t of the way to the x0 prediction
"""
def __init__(
self,
T,
L,
diffuser,
visible,
seq_diffuser=None,
b_0=0.001,
b_T=0.1,
min_b=1.0,
max_b=12.5,
min_sigma=0.05,
max_sigma=1.5,
noise_level=0.5,
schedule_type="linear",
so3_schedule_type="linear",
schedule_kwargs={},
so3_type="igso3",
noise_scale_ca=1.0,
final_noise_scale_ca=1,
ca_noise_schedule_type="constant",
noise_scale_frame=0.5,
final_noise_scale_frame=0.5,
frame_noise_schedule_type="constant",
crd_scale=1 / 15,
potential_manager=None,
partial_T=None,
):
"""
Parameters:
noise_level: scaling on the noise added (set to 0 to use no noise,
to 1 to have full noise)
"""
self.T = T
self.L = L
self.diffuser = diffuser
self.seq_diffuser = seq_diffuser
self.b_0 = b_0
self.b_T = b_T
self.noise_level = noise_level
self.schedule_type = schedule_type
self.so3_type = so3_type
self.crd_scale = crd_scale
self.noise_scale_ca = noise_scale_ca
self.final_noise_scale_ca = final_noise_scale_ca
self.ca_noise_schedule_type = ca_noise_schedule_type
self.noise_scale_frame = noise_scale_frame
self.final_noise_scale_frame = final_noise_scale_frame
self.frame_noise_schedule_type = frame_noise_schedule_type
self.potential_manager = potential_manager
self._log = logging.getLogger(__name__)
self.schedule, self.alpha_schedule, self.alphabar_schedule = get_beta_schedule(
self.T, self.b_0, self.b_T, self.schedule_type, inference=True
)
self.noise_schedule_ca = get_noise_schedule(
self.T,
self.noise_scale_ca,
self.final_noise_scale_ca,
self.ca_noise_schedule_type,
)
self.noise_schedule_frame = get_noise_schedule(
self.T,
self.noise_scale_frame,
self.final_noise_scale_frame,
self.frame_noise_schedule_type,
)
@property
def idx2steps(self):
return self.decode_scheduler.idx2steps.numpy()
def align_to_xt_motif(self, px0, xT, diffusion_mask, eps=1e-6):
"""
Need to align px0 to motif in xT. This is to permit the swapping of residue positions in the px0 motif for the true coordinates.
First, get rotation matrix from px0 to xT for the motif residues.
Second, rotate px0 (whole structure) by that rotation matrix
Third, centre at origin
"""
# if True:
# return px0
def rmsd(V, W, eps=0):
# First sum down atoms, then sum down xyz
N = V.shape[-2]
return np.sqrt(np.sum((V - W) * (V - W), axis=(-2, -1)) / N + eps)
assert (
xT.shape[1] == px0.shape[1]
), f"xT has shape {xT.shape} and px0 has shape {px0.shape}"
L, n_atom, _ = xT.shape # A is number of atoms
atom_mask = ~torch.isnan(px0)
# convert to numpy arrays
px0 = px0.cpu().detach().numpy()
xT = xT.cpu().detach().numpy()
diffusion_mask = diffusion_mask.cpu().detach().numpy()
# 1 centre motifs at origin and get rotation matrix
px0_motif = px0[diffusion_mask, :3].reshape(-1, 3)
xT_motif = xT[diffusion_mask, :3].reshape(-1, 3)
px0_motif_mean = np.copy(px0_motif.mean(0)) # need later
xT_motif_mean = np.copy(xT_motif.mean(0))
# center at origin
px0_motif = px0_motif - px0_motif_mean
xT_motif = xT_motif - xT_motif_mean
# A = px0_motif
# B = xT_motif
A = xT_motif
B = px0_motif
C = np.matmul(A.T, B)
# compute optimal rotation matrix using SVD
U, S, Vt = np.linalg.svd(C)
# ensure right handed coordinate system
d = np.eye(3)
d[-1, -1] = np.sign(np.linalg.det(Vt.T @ U.T))
# construct rotation matrix
R = Vt.T @ d @ U.T
# get rotated coords
rB = B @ R
# calculate rmsd
rms = rmsd(A, rB)
self._log.info(f"Sampled motif RMSD: {rms:.2f}")
# 2 rotate whole px0 by rotation matrix
atom_mask = atom_mask.cpu()
px0[~atom_mask] = 0 # convert nans to 0
px0 = px0.reshape(-1, 3) - px0_motif_mean
px0_ = px0 @ R
# xT_motif_out = xT_motif.reshape(-1,3)
# xT_motif_out = (xT_motif_out @ R ) + px0_motif_mean
# ic(xT_motif_out.shape)
# xT_motif_out = xT_motif_out.reshape((diffusion_mask.sum(),3,3))
# 3 put in same global position as xT
px0_ = px0_ + xT_motif_mean
px0_ = px0_.reshape([L, n_atom, 3])
px0_[~atom_mask] = float("nan")
return torch.Tensor(px0_)
# return torch.tensor(xT_motif_out)
def get_potential_gradients(self, xyz, diffusion_mask):
"""
This could be moved into potential manager if desired - NRB
Function to take a structure (x) and get per-atom gradients used to guide diffusion update
Inputs:
xyz (torch.tensor, required): [L,27,3] Coordinates at which the gradient will be computed
Outputs:
Ca_grads (torch.tensor): [L,3] The gradient at each Ca atom
"""
if self.potential_manager == None or self.potential_manager.is_empty():
return torch.zeros(xyz.shape[0], 3)
use_Cb = False
# seq.requires_grad = True
xyz.requires_grad = True
if not xyz.grad is None:
xyz.grad.zero_()
current_potential = self.potential_manager.compute_all_potentials(xyz)
current_potential.backward()
# Since we are not moving frames, Cb grads are same as Ca grads
# Need access to calculated Cb coordinates to be able to get Cb grads though
Ca_grads = xyz.grad[:, 1, :]
if not diffusion_mask == None:
Ca_grads[diffusion_mask, :] = 0
# check for NaN's
if torch.isnan(Ca_grads).any():
print("WARNING: NaN in potential gradients, replacing with zero grad.")
Ca_grads[:] = 0
return Ca_grads
def get_next_pose(
self,
xt,
px0,
t,
diffusion_mask,
fix_motif=True,
align_motif=True,
include_motif_sidechains=True,
):
"""
Wrapper function to take px0, xt and t, and to produce xt-1
First, aligns px0 to xt
Then gets coordinates, frames and torsion angles
Parameters:
xt (torch.tensor, required): Current coordinates at timestep t
px0 (torch.tensor, required): Prediction of x0
t (int, required): timestep t
diffusion_mask (torch.tensor, required): Mask for structure diffusion
fix_motif (bool): Fix the motif structure
align_motif (bool): Align the model's prediction of the motif to the input motif
include_motif_sidechains (bool): Provide sidechains of the fixed motif to the model
"""
get_allatom = ComputeAllAtomCoords().to(device=xt.device)
L, n_atom = xt.shape[:2]
assert (xt.shape[1] == 14) or (xt.shape[1] == 27)
assert (px0.shape[1] == 14) or (px0.shape[1] == 27)
###############################
### Align pX0 onto Xt motif ###
###############################
if align_motif and diffusion_mask.any():
px0 = self.align_to_xt_motif(px0, xt, diffusion_mask)
# xT_motif_aligned = self.align_to_xt_motif(px0, xt, diffusion_mask)
px0 = px0.to(xt.device)
# Now done with diffusion mask. if fix motif is False, just set diffusion mask to be all True, and all coordinates can diffuse
if not fix_motif:
diffusion_mask[:] = False
# get the next set of CA coordinates
noise_scale_ca = self.noise_schedule_ca(t)
_, ca_deltas = get_next_ca(
xt,
px0,
t,
diffusion_mask,
crd_scale=self.crd_scale,
beta_schedule=self.schedule,
alphabar_schedule=self.alphabar_schedule,
noise_scale=noise_scale_ca,
)
# get the next set of backbone frames (coordinates)
noise_scale_frame = self.noise_schedule_frame(t)
frames_next = get_next_frames(
xt,
px0,
t,
diffuser=self.diffuser,
so3_type=self.so3_type,
diffusion_mask=diffusion_mask,
noise_scale=noise_scale_frame,
)
# Apply gradient step from guiding potentials
# This can be moved to below where the full atom representation is calculated to allow for potentials involving sidechains
grad_ca = self.get_potential_gradients(
xt.clone(), diffusion_mask=diffusion_mask
)
ca_deltas += self.potential_manager.get_guide_scale(t) * grad_ca
# add the delta to the new frames
frames_next = torch.from_numpy(frames_next) + ca_deltas[:, None, :] # translate
fullatom_next = torch.full_like(xt, float("nan")).unsqueeze(0)
fullatom_next[:, :, :3] = frames_next[None]
# This is never used so just make it a fudged tensor - NRB
torsions_next = torch.zeros(1, 1)
if include_motif_sidechains:
fullatom_next[:, diffusion_mask, :14] = xt[None, diffusion_mask]
return fullatom_next.squeeze()[:, :14, :], px0
def sampler_selector(conf: DictConfig):
if conf.scaffoldguided.scaffoldguided:
sampler = model_runners.ScaffoldedSampler(conf)
else:
if conf.inference.model_runner == "default":
sampler = model_runners.Sampler(conf)
elif conf.inference.model_runner == "SelfConditioning":
sampler = model_runners.SelfConditioning(conf)
elif conf.inference.model_runner == "ScaffoldedSampler":
sampler = model_runners.ScaffoldedSampler(conf)
else:
raise ValueError(f"Unrecognized sampler {conf.model_runner}")
return sampler
def parse_pdb(filename, **kwargs):
"""extract xyz coords for all heavy atoms"""
lines = open(filename, "r").readlines()
return parse_pdb_lines(lines, **kwargs)
def parse_pdb_lines(lines, parse_hetatom=False, ignore_het_h=True):
# indices of residues observed in the structure
res = [
(l[22:26], l[17:20])
for l in lines
if l[:4] == "ATOM" and l[12:16].strip() == "CA"
]
seq = [util.aa2num[r[1]] if r[1] in util.aa2num.keys() else 20 for r in res]
pdb_idx = [
(l[21:22].strip(), int(l[22:26].strip()))
for l in lines
if l[:4] == "ATOM" and l[12:16].strip() == "CA"
] # chain letter, res num
# 4 BB + up to 10 SC atoms
xyz = np.full((len(res), 14, 3), np.nan, dtype=np.float32)
for l in lines:
if l[:4] != "ATOM":
continue
chain, resNo, atom, aa = (
l[21:22],
int(l[22:26]),
" " + l[12:16].strip().ljust(3),
l[17:20],
)
idx = pdb_idx.index((chain, resNo))
# for i_atm, tgtatm in enumerate(util.aa2long[util.aa2num[aa]]):
for i_atm, tgtatm in enumerate(
util.aa2long[util.aa2num[aa]][:14]
): # Nate's proposed change
if (
tgtatm is not None and tgtatm.strip() == atom.strip()
): # ignore whitespace
xyz[idx, i_atm, :] = [float(l[30:38]), float(l[38:46]), float(l[46:54])]
break
# save atom mask
mask = np.logical_not(np.isnan(xyz[..., 0]))
xyz[np.isnan(xyz[..., 0])] = 0.0
# remove duplicated (chain, resi)
new_idx = []
i_unique = []
for i, idx in enumerate(pdb_idx):
if idx not in new_idx:
new_idx.append(idx)
i_unique.append(i)
pdb_idx = new_idx
xyz = xyz[i_unique]
mask = mask[i_unique]
seq = np.array(seq)[i_unique]
out = {
"xyz": xyz, # cartesian coordinates, [Lx14]
"mask": mask, # mask showing which atoms are present in the PDB file, [Lx14]
"idx": np.array(
[i[1] for i in pdb_idx]
), # residue numbers in the PDB file, [L]
"seq": np.array(seq), # amino acid sequence, [L]
"pdb_idx": pdb_idx, # list of (chain letter, residue number) in the pdb file, [L]
}
# heteroatoms (ligands, etc)
if parse_hetatom:
xyz_het, info_het = [], []
for l in lines:
if l[:6] == "HETATM" and not (ignore_het_h and l[77] == "H"):
info_het.append(
dict(
idx=int(l[7:11]),
atom_id=l[12:16],
atom_type=l[77],
name=l[16:20],
)
)
xyz_het.append([float(l[30:38]), float(l[38:46]), float(l[46:54])])
out["xyz_het"] = np.array(xyz_het)
out["info_het"] = info_het
return out
def process_target(pdb_path, parse_hetatom=False, center=True):
# Read target pdb and extract features.
target_struct = parse_pdb(pdb_path, parse_hetatom=parse_hetatom)
# Zero-center positions
ca_center = target_struct["xyz"][:, :1, :].mean(axis=0, keepdims=True)
if not center:
ca_center = 0
xyz = torch.from_numpy(target_struct["xyz"] - ca_center)
seq_orig = torch.from_numpy(target_struct["seq"])
atom_mask = torch.from_numpy(target_struct["mask"])
seq_len = len(xyz)
# Make 27 atom representation
xyz_27 = torch.full((seq_len, 27, 3), np.nan).float()
xyz_27[:, :14, :] = xyz[:, :14, :]
mask_27 = torch.full((seq_len, 27), False)
mask_27[:, :14] = atom_mask
out = {
"xyz_27": xyz_27,
"mask_27": mask_27,
"seq": seq_orig,
"pdb_idx": target_struct["pdb_idx"],
}
if parse_hetatom:
out["xyz_het"] = target_struct["xyz_het"]
out["info_het"] = target_struct["info_het"]
return out
def get_idx0_hotspots(mappings, ppi_conf, binderlen):
"""
Take pdb-indexed hotspot resudes and the length of the binder, and makes the 0-indexed tensor of hotspots
"""
hotspot_idx = None
if binderlen > 0:
if ppi_conf.hotspot_res is not None:
assert all(
[i[0].isalpha() for i in ppi_conf.hotspot_res]
), "Hotspot residues need to be provided in pdb-indexed form. E.g. A100,A103"
hotspots = [(i[0], int(i[1:])) for i in ppi_conf.hotspot_res]
hotspot_idx = []
for i, res in enumerate(mappings["receptor_con_ref_pdb_idx"]):
if res in hotspots:
hotspot_idx.append(mappings["receptor_con_hal_idx0"][i])
return hotspot_idx
class BlockAdjacency:
"""
Class for handling PPI design inference with ss/block_adj inputs.
Basic idea is to provide a list of scaffolds, and to output ss and adjacency
matrices based off of these, while sampling additional lengths.
Inputs:
- scaffold_list: list of scaffolds (e.g. ['2kl8','1cif']). Can also be a .txt file.
- scaffold dir: directory where scaffold ss and adj are precalculated
- sampled_insertion: how many additional residues do you want to add to each loop segment? Randomly sampled 0-this number (or within given range)
- sampled_N: randomly sample up to this number of additional residues at N-term
- sampled_C: randomly sample up to this number of additional residues at C-term
- ss_mask: how many residues do you want to mask at either end of a ss (H or E) block. Fixed value
- num_designs: how many designs are you wanting to generate? Currently only used for bookkeeping
- systematic: do you want to systematically work through the list of scaffolds, or randomly sample (default)
- num_designs_per_input: Not really implemented yet. Maybe not necessary
Outputs:
- L: new length of chain to be diffused
- ss: all loops and insertions, and ends of ss blocks (up to ss_mask) set to mask token (3). Onehot encoded. (L,4)
- adj: block adjacency with equivalent masking as ss (L,L)
"""
def __init__(self, conf, num_designs):
"""
Parameters:
inputs:
conf.scaffold_list as conf
conf.inference.num_designs for sanity checking
"""
# either list or path to .txt file with list of scaffolds
if conf.scaffold_list is not None:
if type(conf.scaffold_list) == list:
self.scaffold_list = scaffold_list
elif conf.scaffold_list[-4:] == ".txt":
# txt file with list of ids
list_from_file = []
with open(conf.scaffold_list, "r") as f:
for line in f:
list_from_file.append(line.strip())
self.scaffold_list = list_from_file
else:
raise NotImplementedError
else:
self.scaffold_list = [
os.path.split(i)[1][:-6]
for i in glob.glob(f"{conf.scaffold_dir}/*_ss.pt")
]
# path to directory with scaffolds, ss files and block_adjacency files
self.scaffold_dir = conf.scaffold_dir
# maximum sampled insertion in each loop segment
if "-" in str(conf.sampled_insertion):
self.sampled_insertion = [
int(str(conf.sampled_insertion).split("-")[0]),
int(str(conf.sampled_insertion).split("-")[1]),
]
else:
self.sampled_insertion = [0, int(conf.sampled_insertion)]
# maximum sampled insertion at N- and C-terminus
if "-" in str(conf.sampled_N):
self.sampled_N = [
int(str(conf.sampled_N).split("-")[0]),
int(str(conf.sampled_N).split("-")[1]),
]
else:
self.sampled_N = [0, int(conf.sampled_N)]
if "-" in str(conf.sampled_C):
self.sampled_C = [
int(str(conf.sampled_C).split("-")[0]),
int(str(conf.sampled_C).split("-")[1]),
]
else:
self.sampled_C = [0, int(conf.sampled_C)]
# number of residues to mask ss identity of in H/E regions (from junction)
# e.g. if ss_mask = 2, L,L,L,H,H,H,H,H,H,H,L,L,E,E,E,E,E,E,L,L,L,L,L,L would become\
# M,M,M,M,M,H,H,H,M,M,M,M,M,M,E,E,M,M,M,M,M,M,M,M where M is mask
self.ss_mask = conf.ss_mask
# whether or not to work systematically through the list
self.systematic = conf.systematic
self.num_designs = num_designs
if len(self.scaffold_list) > self.num_designs:
print(
"WARNING: Scaffold set is bigger than num_designs, so not every scaffold type will be sampled"
)
# for tracking number of designs
self.num_completed = 0
if self.systematic:
self.item_n = 0
# whether to mask loops or not
if not conf.mask_loops:
assert conf.sampled_N == 0, "can't add length if not masking loops"
assert conf.sampled_C == 0, "can't add lemgth if not masking loops"
assert conf.sampled_insertion == 0, "can't add length if not masking loops"
self.mask_loops = False
else:
self.mask_loops = True
def get_ss_adj(self, item):
"""
Given at item, get the ss tensor and block adjacency matrix for that item
"""
ss = torch.load(os.path.join(self.scaffold_dir, f'{item.split(".")[0]}_ss.pt'))
adj = torch.load(
os.path.join(self.scaffold_dir, f'{item.split(".")[0]}_adj.pt')
)
return ss, adj
def mask_to_segments(self, mask):
"""
Takes a mask of True (loop) and False (non-loop), and outputs list of tuples (loop or not, length of element)
"""
segments = []
begin = -1
end = -1
for i in range(mask.shape[0]):
# Starting edge case
if i == 0:
begin = 0
continue
if not mask[i] == mask[i - 1]:
end = i
if mask[i - 1].item() is True:
segments.append(("loop", end - begin))
else:
segments.append(("ss", end - begin))
begin = i
# Ending edge case: last segment is length one
if not end == mask.shape[0]:
if mask[i].item() is True:
segments.append(("loop", mask.shape[0] - begin))
else:
segments.append(("ss", mask.shape[0] - begin))
return segments
def expand_mask(self, mask, segments):
"""
Function to generate a new mask with dilated loops and N and C terminal additions
"""
N_add = random.randint(self.sampled_N[0], self.sampled_N[1])
C_add = random.randint(self.sampled_C[0], self.sampled_C[1])
output = N_add * [False]
for ss, length in segments:
if ss == "ss":
output.extend(length * [True])
else:
# randomly sample insertion length
ins = random.randint(
self.sampled_insertion[0], self.sampled_insertion[1]
)
output.extend((length + ins) * [False])
output.extend(C_add * [False])
assert torch.sum(torch.tensor(output)) == torch.sum(~mask)
return torch.tensor(output)
def expand_ss(self, ss, adj, mask, expanded_mask):
"""
Given an expanded mask, populate a new ss and adj based on this
"""
ss_out = torch.ones(expanded_mask.shape[0]) * 3 # set to mask token
adj_out = torch.full((expanded_mask.shape[0], expanded_mask.shape[0]), 0.0)
ss_out[expanded_mask] = ss[~mask]
expanded_mask_2d = torch.full(adj_out.shape, True)
# mask out loops/insertions, which is ~expanded_mask
expanded_mask_2d[~expanded_mask, :] = False
expanded_mask_2d[:, ~expanded_mask] = False
mask_2d = torch.full(adj.shape, True)
# mask out loops. This mask is True=loop
mask_2d[mask, :] = False
mask_2d[:, mask] = False
adj_out[expanded_mask_2d] = adj[mask_2d]
adj_out = adj_out.reshape((expanded_mask.shape[0], expanded_mask.shape[0]))
return ss_out, adj_out
def mask_ss_adj(self, ss, adj, expanded_mask):
"""
Given an expanded ss and adj, mask some number of residues at either end of non-loop ss
"""
original_mask = torch.clone(expanded_mask)
if self.ss_mask > 0:
for i in range(1, self.ss_mask + 1):
expanded_mask[i:] *= original_mask[:-i]
expanded_mask[:-i] *= original_mask[i:]
if self.mask_loops:
ss[~expanded_mask] = 3
adj[~expanded_mask, :] = 0
adj[:, ~expanded_mask] = 0
# mask adjacency
adj[~expanded_mask] = 2
adj[:, ~expanded_mask] = 2
return ss, adj
def get_scaffold(self):
"""
Wrapper method for pulling an item from the list, and preparing ss and block adj features
"""
if self.systematic:
# reset if num designs > num_scaffolds
if self.item_n >= len(self.scaffold_list):
self.item_n = 0
item = self.scaffold_list[self.item_n]
self.item_n += 1
else:
item = random.choice(self.scaffold_list)
print("Scaffold constrained based on file: ", item)
# load files
ss, adj = self.get_ss_adj(item)
adj_orig = torch.clone(adj)
# separate into segments (loop or not)
mask = torch.where(ss == 2, 1, 0).bool()
segments = self.mask_to_segments(mask)
# insert into loops to generate new mask
expanded_mask = self.expand_mask(mask, segments)
# expand ss and adj
ss, adj = self.expand_ss(ss, adj, mask, expanded_mask)
# finally, mask some proportion of the ss at either end of the non-loop ss blocks
ss, adj = self.mask_ss_adj(ss, adj, expanded_mask)
# and then update num_completed
self.num_completed += 1
return ss.shape[0], torch.nn.functional.one_hot(ss.long(), num_classes=4), adj
class Target:
"""
Class to handle targets (fixed chains).
Inputs:
- path to pdb file
- hotspot residues, in the form B10,B12,B60 etc
- whether or not to crop, and with which method
Outputs:
- Dictionary of xyz coordinates, indices, pdb_indices, pdb mask
"""
def __init__(self, conf: DictConfig, hotspots=None):
self.pdb = parse_pdb(conf.target_path)
if hotspots is not None:
self.hotspots = hotspots
else:
self.hotspots = []
self.pdb["hotspots"] = np.array(
[
True if f"{i[0]}{i[1]}" in self.hotspots else False
for i in self.pdb["pdb_idx"]
]
)
if conf.contig_crop:
self.contig_crop(conf.contig_crop)
def parse_contig(self, contig_crop):
"""
Takes contig input and parses
"""
contig_list = []
for contig in contig_crop[0].split(" "):
subcon = []
for crop in contig.split("/"):
if crop[0].isalpha():
subcon.extend(
[
(crop[0], p)
for p in np.arange(
int(crop.split("-")[0][1:]), int(crop.split("-")[1]) + 1
)
]
)
contig_list.append(subcon)
return contig_list
def contig_crop(self, contig_crop, residue_offset=200) -> None:
"""
Method to take a contig string referring to the receptor and output a pdb dictionary with just this crop
NB there are two ways to provide inputs:
- 1) e.g. B1-30,0 B50-60,0. This will add a residue offset between each chunk
- 2) e.g. B1-30,B50-60,B80-100. This will keep the original indexing of the pdb file.
Can handle the target being on multiple chains
"""
# add residue offset between chains if multiple chains in receptor file
for idx, val in enumerate(self.pdb["pdb_idx"]):
if idx != 0 and val != self.pdb["pdb_idx"][idx - 1]:
self.pdb["idx"][idx:] += residue_offset + idx
# convert contig to mask
contig_list = self.parse_contig(contig_crop)
# add residue offset to different parts of contig_list
for contig in contig_list[1:]:
start = int(contig[0][1])
self.pdb["idx"][start:] += residue_offset
# flatten list
contig_list = [i for j in contig_list for i in j]
mask = np.array(
[True if i in contig_list else False for i in self.pdb["pdb_idx"]]
)
# sanity check
assert np.sum(self.pdb["hotspots"]) == np.sum(
self.pdb["hotspots"][mask]
), "Supplied hotspot residues are missing from the target contig!"
# crop pdb
for key, val in self.pdb.items():
try:
self.pdb[key] = val[mask]
except:
self.pdb[key] = [i for idx, i in enumerate(val) if mask[idx]]
self.pdb["crop_mask"] = mask
def get_target(self):
return self.pdb
| RFdiffusion-main | inference/utils.py |
#!/usr/bin/env python
import os,sys,glob,torch,random
import numpy as np
import argparse
try:
import pyrosetta
pyrosetta.init()
APPROX = False
except:
print("WARNING: pyRosetta not found, will use an approximate SSE calculation")
APPROX = True
def main():
args=get_args()
assert args.input_pdb or args.pdb_dir is not None, 'Need to provide either an input pdb (--input_pdb) or a path to pdbs (--pdb_dir)'
assert not (args.input_pdb is not None and args.pdb_dir is not None), 'Need to provide either --input_pdb or --pdb_dir, not both'
os.makedirs(args.out_dir, exist_ok=True)
if args.pdb_dir is not None:
pdbs=glob.glob(f'{args.pdb_dir}/*pdb')
else:
pdbs=[args.input_pdb]
for pdb in pdbs:
name=os.path.split(pdb)[1][:-4]
secstruc_dict=extract_secstruc(pdb)
xyz,_,_ = parse_pdb_torch(pdb)
ss, idx = ss_to_tensor(secstruc_dict)
block_adj = construct_block_adj_matrix(torch.FloatTensor(ss), torch.tensor(xyz)).float()
ss_tens, mask = mask_ss(ss, idx, max_mask=0)
ss_argmax = torch.argmax(ss_tens[:,:4], dim=1).float()
torch.save(ss_argmax, os.path.join(args.out_dir, f'{name}_ss.pt'))
torch.save(block_adj, os.path.join(args.out_dir, f'{name}_adj.pt'))
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--pdb_dir",required=False, help="path to directory of pdbs. Either pass this or the path to a specific pdb (--input_pdb)", default=None)
parser.add_argument("--input_pdb", required=False, help="path to input pdb. Either provide this of path to directory of pdbs (--pdb_dir)", default=None)
parser.add_argument("--out_dir",dest="out_dir", required=True, help='need to specify an output path')
args = parser.parse_args()
return args
def extract_secstruc(fn):
pdb=parse_pdb(fn)
idx = pdb['idx']
if APPROX:
aa_sequence = pdb["seq"]
secstruct = get_sse(pdb["xyz"][:,1])
else:
dssp = pyrosetta.rosetta.core.scoring.dssp
pose = pyrosetta.io.pose_from_pdb(fn)
dssp.Dssp(pose).insert_ss_into_pose(pose, True)
aa_sequence = pose.sequence()
secstruct = pose.secstruct()
secstruc_dict = {'sequence':[i for i in aa_sequence],
'idx':[int(i) for i in idx],
'ss':[i for i in secstruct]}
return secstruc_dict
def ss_to_tensor(ss):
"""
Function to convert ss files to indexed tensors
0 = Helix
1 = Strand
2 = Loop
3 = Mask/unknown
4 = idx for pdb
"""
ss_conv = {'H':0,'E':1,'L':2}
idx = np.array(ss['idx'])
ss_int = np.array([int(ss_conv[i]) for i in ss['ss']])
return ss_int, idx
def mask_ss(ss, idx, min_mask = 0, max_mask = 1.0):
mask_prop = random.uniform(min_mask, max_mask)
transitions = np.where(ss[:-1] - ss[1:] != 0)[0] #gets last index of each block of ss
stuck_counter = 0
while len(ss[ss == 3])/len(ss) < mask_prop or stuck_counter > 100:
width = random.randint(1,9)
start = random.choice(transitions)
offset = random.randint(-8,1)
try:
ss[start+offset:start+offset+width] = 3
except:
stuck_counter += 1
pass
ss = torch.tensor(ss)
ss = torch.nn.functional.one_hot(ss, num_classes=4)
ss = torch.cat((ss, torch.tensor(idx)[...,None]), dim=-1)
# mask = torch.where(torch.argmax(ss[:,:-1], dim=-1) == 3, False, True)
mask=torch.tensor(np.where(np.argmax(ss[:,:-1].numpy(), axis=-1) == 3))
return ss, mask
def generate_Cbeta(N,Ca,C):
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = torch.cross(b, c, dim=-1)
#Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
# fd: below matches sidechain generator (=Rosetta params)
Cb = -0.57910144*a + 0.5689693*b - 0.5441217*c + Ca
return Cb
def get_pair_dist(a, b):
"""calculate pair distances between two sets of points
Parameters
----------
a,b : pytorch tensors of shape [batch,nres,3]
store Cartesian coordinates of two sets of atoms
Returns
-------
dist : pytorch tensor of shape [batch,nres,nres]
stores paitwise distances between atoms in a and b
"""
dist = torch.cdist(a, b, p=2)
return dist
def construct_block_adj_matrix( sstruct, xyz, cutoff=6, include_loops=False ):
'''
Given a sstruct specification and backbone coordinates, build a block adjacency matrix.
Input:
sstruct (torch.FloatTensor): (L) length tensor with numeric encoding of sstruct at each position
xyz (torch.FloatTensor): (L,3,3) tensor of Cartesian coordinates of backbone N,Ca,C atoms
cutoff (float): The Cb distance cutoff under which residue pairs are considered adjacent
By eye, Nate thinks 6A is a good Cb distance cutoff
Output:
block_adj (torch.FloatTensor): (L,L) boolean matrix where adjacent secondary structure contacts are 1
'''
L = xyz.shape[0]
# three anchor atoms
N = xyz[:,0]
Ca = xyz[:,1]
C = xyz[:,2]
# recreate Cb given N,Ca,C
Cb = generate_Cbeta(N,Ca,C)
# May need a batch dimension - NRB
dist = get_pair_dist(Cb,Cb) # [L,L]
dist[torch.isnan(dist)] = 999.9
dist += 999.9*torch.eye(L,device=xyz.device)
# Now we have dist matrix and sstruct specification, turn this into a block adjacency matrix
# There is probably a way to do this in closed-form with a beautiful einsum but I am going to do the loop approach
# First: Construct a list of segments and the index at which they begin and end
in_segment = True
segments = []
begin = -1
end = -1
for i in range(sstruct.shape[0]):
# Starting edge case
if i == 0:
begin = 0
continue
if not sstruct[i] == sstruct[i-1]:
end = i
segments.append( (sstruct[i-1], begin, end) )
begin = i
# Ending edge case: last segment is length one
if not end == sstruct.shape[0]:
segments.append( (sstruct[-1], begin, sstruct.shape[0]) )
block_adj = torch.zeros_like(dist)
for i in range(len(segments)):
curr_segment = segments[i]
if curr_segment[0] == 2 and not include_loops: continue
begin_i = curr_segment[1]
end_i = curr_segment[2]
for j in range(i+1, len(segments)):
j_segment = segments[j]
if j_segment[0] == 2 and not include_loops: continue
begin_j = j_segment[1]
end_j = j_segment[2]
if torch.any( dist[begin_i:end_i, begin_j:end_j] < cutoff ):
# Matrix is symmetic
block_adj[begin_i:end_i, begin_j:end_j] = torch.ones(end_i - begin_i, end_j - begin_j)
block_adj[begin_j:end_j, begin_i:end_i] = torch.ones(end_j - begin_j, end_i - begin_i)
return block_adj
def parse_pdb_torch(filename):
lines = open(filename,'r').readlines()
return parse_pdb_lines_torch(lines)
#'''
def parse_pdb_lines_torch(lines):
# indices of residues observed in the structure
pdb_idx = []
for l in lines:
if l[:4]=="ATOM" and l[12:16].strip()=="CA":
idx = ( l[21:22].strip(), int(l[22:26].strip()) )
if idx not in pdb_idx:
pdb_idx.append(idx)
# 4 BB + up to 10 SC atoms
xyz = np.full((len(pdb_idx), 27, 3), np.nan, dtype=np.float32)
for l in lines:
if l[:4] != "ATOM":
continue
chain, resNo, atom, aa = l[21:22], int(l[22:26]), ' '+l[12:16].strip().ljust(3), l[17:20]
idx = pdb_idx.index((chain,resNo))
for i_atm, tgtatm in enumerate(aa2long[aa2num[aa]]):
if tgtatm == atom:
xyz[idx,i_atm,:] = [float(l[30:38]), float(l[38:46]), float(l[46:54])]
break
# save atom mask
mask = np.logical_not(np.isnan(xyz[...,0]))
xyz[np.isnan(xyz[...,0])] = 0.0
return xyz,mask,np.array(pdb_idx)
def parse_pdb(filename, **kwargs):
'''extract xyz coords for all heavy atoms'''
lines = open(filename,'r').readlines()
return parse_pdb_lines(lines, **kwargs)
def parse_pdb_lines(lines, parse_hetatom=False, ignore_het_h=True):
# indices of residues observed in the structure
res = [(l[22:26],l[17:20]) for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="CA"]
seq = [aa2num[r[1]] if r[1] in aa2num.keys() else 20 for r in res]
pdb_idx = [( l[21:22].strip(), int(l[22:26].strip()) ) for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="CA"] # chain letter, res num
# 4 BB + up to 10 SC atoms
xyz = np.full((len(res), 27, 3), np.nan, dtype=np.float32)
for l in lines:
if l[:4] != "ATOM":
continue
chain, resNo, atom, aa = l[21:22], int(l[22:26]), ' '+l[12:16].strip().ljust(3), l[17:20]
idx = pdb_idx.index((chain,resNo))
for i_atm, tgtatm in enumerate(aa2long[aa2num[aa]]):
if tgtatm is not None and tgtatm.strip() == atom.strip(): # ignore whitespace
xyz[idx,i_atm,:] = [float(l[30:38]), float(l[38:46]), float(l[46:54])]
break
# save atom mask
mask = np.logical_not(np.isnan(xyz[...,0]))
xyz[np.isnan(xyz[...,0])] = 0.0
# remove duplicated (chain, resi)
new_idx = []
i_unique = []
for i,idx in enumerate(pdb_idx):
if idx not in new_idx:
new_idx.append(idx)
i_unique.append(i)
pdb_idx = new_idx
xyz = xyz[i_unique]
mask = mask[i_unique]
seq = np.array(seq)[i_unique]
out = {'xyz':xyz, # cartesian coordinates, [Lx14]
'mask':mask, # mask showing which atoms are present in the PDB file, [Lx14]
'idx':np.array([i[1] for i in pdb_idx]), # residue numbers in the PDB file, [L]
'seq':np.array(seq), # amino acid sequence, [L]
'pdb_idx': pdb_idx, # list of (chain letter, residue number) in the pdb file, [L]
}
# heteroatoms (ligands, etc)
if parse_hetatom:
xyz_het, info_het = [], []
for l in lines:
if l[:6]=='HETATM' and not (ignore_het_h and l[77]=='H'):
info_het.append(dict(
idx=int(l[7:11]),
atom_id=l[12:16],
atom_type=l[77],
name=l[16:20]
))
xyz_het.append([float(l[30:38]), float(l[38:46]), float(l[46:54])])
out['xyz_het'] = np.array(xyz_het)
out['info_het'] = info_het
return out
num2aa=[
'ALA','ARG','ASN','ASP','CYS',
'GLN','GLU','GLY','HIS','ILE',
'LEU','LYS','MET','PHE','PRO',
'SER','THR','TRP','TYR','VAL',
'UNK','MAS',
]
aa2num= {x:i for i,x in enumerate(num2aa)}
# full sc atom representation (Nx14)
aa2long=[
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # ala
(" N "," CA "," C "," O "," CB "," CG "," CD "," NE "," CZ "," NH1"," NH2", None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD "," HE ","1HH1","2HH1","1HH2","2HH2"), # arg
(" N "," CA "," C "," O "," CB "," CG "," OD1"," ND2", None, None, None, None, None, None," H "," HA ","1HB ","2HB ","1HD2","2HD2", None, None, None, None, None, None, None), # asn
(" N "," CA "," C "," O "," CB "," CG "," OD1"," OD2", None, None, None, None, None, None," H "," HA ","1HB ","2HB ", None, None, None, None, None, None, None, None, None), # asp
(" N "," CA "," C "," O "," CB "," SG ", None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB "," HG ", None, None, None, None, None, None, None, None), # cys
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," NE2", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HE2","2HE2", None, None, None, None, None), # gln
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," OE2", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ", None, None, None, None, None, None, None), # glu
(" N "," CA "," C "," O ", None, None, None, None, None, None, None, None, None, None," H ","1HA ","2HA ", None, None, None, None, None, None, None, None, None, None), # gly
(" N "," CA "," C "," O "," CB "," CG "," ND1"," CD2"," CE1"," NE2", None, None, None, None," H "," HA ","1HB ","2HB "," HD2"," HE1"," HE2", None, None, None, None, None, None), # his
(" N "," CA "," C "," O "," CB "," CG1"," CG2"," CD1", None, None, None, None, None, None," H "," HA "," HB ","1HG2","2HG2","3HG2","1HG1","2HG1","1HD1","2HD1","3HD1", None, None), # ile
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2", None, None, None, None, None, None," H "," HA ","1HB ","2HB "," HG ","1HD1","2HD1","3HD1","1HD2","2HD2","3HD2", None, None), # leu
(" N "," CA "," C "," O "," CB "," CG "," CD "," CE "," NZ ", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD ","1HE ","2HE ","1HZ ","2HZ ","3HZ "), # lys
(" N "," CA "," C "," O "," CB "," CG "," SD "," CE ", None, None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HE ","2HE ","3HE ", None, None, None, None), # met
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE1"," CE2"," CZ ", None, None, None," H "," HA ","1HB ","2HB "," HD1"," HD2"," HE1"," HE2"," HZ ", None, None, None, None), # phe
(" N "," CA "," C "," O "," CB "," CG "," CD ", None, None, None, None, None, None, None," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD ", None, None, None, None, None, None), # pro
(" N "," CA "," C "," O "," CB "," OG ", None, None, None, None, None, None, None, None," H "," HG "," HA ","1HB ","2HB ", None, None, None, None, None, None, None, None), # ser
(" N "," CA "," C "," O "," CB "," OG1"," CG2", None, None, None, None, None, None, None," H "," HG1"," HA "," HB ","1HG2","2HG2","3HG2", None, None, None, None, None, None), # thr
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," NE1"," CE2"," CE3"," CZ2"," CZ3"," CH2"," H "," HA ","1HB ","2HB "," HD1"," HE1"," HZ2"," HH2"," HZ3"," HE3", None, None, None), # trp
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE1"," CE2"," CZ "," OH ", None, None," H "," HA ","1HB ","2HB "," HD1"," HE1"," HE2"," HD2"," HH ", None, None, None, None), # tyr
(" N "," CA "," C "," O "," CB "," CG1"," CG2", None, None, None, None, None, None, None," H "," HA "," HB ","1HG1","2HG1","3HG1","1HG2","2HG2","3HG2", None, None, None, None), # val
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # unk
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # mask
]
def get_sse(ca_coord):
'''
calculates the SSE of a peptide chain based on the P-SEA algorithm (Labesse 1997)
code borrowed from biokite: https://github.com/biokit/biokit
'''
def vector_dot(v1,v2): return (v1*v2).sum(-1)
def norm_vector(v): return v / np.linalg.norm(v, axis=-1, keepdims=True)
def displacement(atoms1, atoms2):
v1 = np.asarray(atoms1)
v2 = np.asarray(atoms2)
if len(v1.shape) <= len(v2.shape):
diff = v2 - v1
else:
diff = -(v1 - v2)
return diff
def distance(atoms1, atoms2):
diff = displacement(atoms1, atoms2)
return np.sqrt(vector_dot(diff, diff))
def angle(atoms1, atoms2, atoms3):
v1 = norm_vector(displacement(atoms1, atoms2))
v2 = norm_vector(displacement(atoms3, atoms2))
return np.arccos(vector_dot(v1,v2))
def dihedral(atoms1, atoms2, atoms3, atoms4):
v1 = norm_vector(displacement(atoms1, atoms2))
v2 = norm_vector(displacement(atoms2, atoms3))
v3 = norm_vector(displacement(atoms3, atoms4))
n1 = np.cross(v1, v2)
n2 = np.cross(v2, v3)
# Calculation using atan2, to ensure the correct sign of the angle
x = vector_dot(n1,n2)
y = vector_dot(np.cross(n1,n2), v2)
return np.arctan2(y,x)
_radians_to_angle = 2*np.pi/360
_r_helix = ((89-12)*_radians_to_angle, (89+12)*_radians_to_angle)
_a_helix = ((50-20)*_radians_to_angle, (50+20)*_radians_to_angle)
_d2_helix = ((5.5-0.5), (5.5+0.5))
_d3_helix = ((5.3-0.5), (5.3+0.5))
_d4_helix = ((6.4-0.6), (6.4+0.6))
_r_strand = ((124-14)*_radians_to_angle, (124+14)*_radians_to_angle)
_a_strand = ((-180)*_radians_to_angle, (-125)*_radians_to_angle,
(145)*_radians_to_angle, (180)*_radians_to_angle)
_d2_strand = ((6.7-0.6), (6.7+0.6))
_d3_strand = ((9.9-0.9), (9.9+0.9))
_d4_strand = ((12.4-1.1), (12.4+1.1))
# Filter all CA atoms in the relevant chain.
d2i_coord = np.full(( len(ca_coord), 2, 3 ), np.nan)
d3i_coord = np.full(( len(ca_coord), 2, 3 ), np.nan)
d4i_coord = np.full(( len(ca_coord), 2, 3 ), np.nan)
ri_coord = np.full(( len(ca_coord), 3, 3 ), np.nan)
ai_coord = np.full(( len(ca_coord), 4, 3 ), np.nan)
# The distances and angles are not defined for the entire interval,
# therefore the indices do not have the full range
# Values that are not defined are NaN
for i in range(1, len(ca_coord)-1): d2i_coord[i] = (ca_coord[i-1], ca_coord[i+1])
for i in range(1, len(ca_coord)-2): d3i_coord[i] = (ca_coord[i-1], ca_coord[i+2])
for i in range(1, len(ca_coord)-3): d4i_coord[i] = (ca_coord[i-1], ca_coord[i+3])
for i in range(1, len(ca_coord)-1): ri_coord[i] = (ca_coord[i-1], ca_coord[i], ca_coord[i+1])
for i in range(1, len(ca_coord)-2): ai_coord[i] = (ca_coord[i-1], ca_coord[i], ca_coord[i+1], ca_coord[i+2])
d2i = distance(d2i_coord[:,0], d2i_coord[:,1])
d3i = distance(d3i_coord[:,0], d3i_coord[:,1])
d4i = distance(d4i_coord[:,0], d4i_coord[:,1])
ri = angle(ri_coord[:,0], ri_coord[:,1], ri_coord[:,2])
ai = dihedral(ai_coord[:,0], ai_coord[:,1], ai_coord[:,2], ai_coord[:,3])
sse = ["L"] * len(ca_coord)
# Annotate helices
# Find CA that meet criteria for potential helices
is_pot_helix = np.zeros(len(sse), dtype=bool)
for i in range(len(sse)):
if (
d3i[i] >= _d3_helix[0] and d3i[i] <= _d3_helix[1]
and d4i[i] >= _d4_helix[0] and d4i[i] <= _d4_helix[1]
) or (
ri[i] >= _r_helix[0] and ri[i] <= _r_helix[1]
and ai[i] >= _a_helix[0] and ai[i] <= _a_helix[1]
):
is_pot_helix[i] = True
# Real helices are 5 consecutive helix elements
is_helix = np.zeros(len(sse), dtype=bool)
counter = 0
for i in range(len(sse)):
if is_pot_helix[i]:
counter += 1
else:
if counter >= 5:
is_helix[i-counter : i] = True
counter = 0
# Extend the helices by one at each end if CA meets extension criteria
i = 0
while i < len(sse):
if is_helix[i]:
sse[i] = "H"
if (
d3i[i-1] >= _d3_helix[0] and d3i[i-1] <= _d3_helix[1]
) or (
ri[i-1] >= _r_helix[0] and ri[i-1] <= _r_helix[1]
):
sse[i-1] = "H"
sse[i] = "H"
if (
d3i[i+1] >= _d3_helix[0] and d3i[i+1] <= _d3_helix[1]
) or (
ri[i+1] >= _r_helix[0] and ri[i+1] <= _r_helix[1]
):
sse[i+1] = "H"
i += 1
# Annotate sheets
# Find CA that meet criteria for potential strands
is_pot_strand = np.zeros(len(sse), dtype=bool)
for i in range(len(sse)):
if ( d2i[i] >= _d2_strand[0] and d2i[i] <= _d2_strand[1]
and d3i[i] >= _d3_strand[0] and d3i[i] <= _d3_strand[1]
and d4i[i] >= _d4_strand[0] and d4i[i] <= _d4_strand[1]
) or (
ri[i] >= _r_strand[0] and ri[i] <= _r_strand[1]
and ( (ai[i] >= _a_strand[0] and ai[i] <= _a_strand[1])
or (ai[i] >= _a_strand[2] and ai[i] <= _a_strand[3]))
):
is_pot_strand[i] = True
# Real strands are 5 consecutive strand elements,
# or shorter fragments of at least 3 consecutive strand residues,
# if they are in hydrogen bond proximity to 5 other residues
pot_strand_coord = ca_coord[is_pot_strand]
is_strand = np.zeros(len(sse), dtype=bool)
counter = 0
contacts = 0
for i in range(len(sse)):
if is_pot_strand[i]:
counter += 1
coord = ca_coord[i]
for strand_coord in ca_coord:
dist = distance(coord, strand_coord)
if dist >= 4.2 and dist <= 5.2:
contacts += 1
else:
if counter >= 4:
is_strand[i-counter : i] = True
elif counter == 3 and contacts >= 5:
is_strand[i-counter : i] = True
counter = 0
contacts = 0
# Extend the strands by one at each end if CA meets extension criteria
i = 0
while i < len(sse):
if is_strand[i]:
sse[i] = "E"
if d3i[i-1] >= _d3_strand[0] and d3i[i-1] <= _d3_strand[1]:
sse[i-1] = "E"
sse[i] = "E"
if d3i[i+1] >= _d3_strand[0] and d3i[i+1] <= _d3_strand[1]:
sse[i+1] = "E"
i += 1
return sse
if __name__ == "__main__":
main()
| RFdiffusion-main | helper_scripts/make_secstruc_adj.py |
from setuptools import setup, find_packages
setup(
name = 'voicebox-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.34',
license='MIT',
description = 'Voicebox - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/voicebox-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'text to speech'
],
install_requires=[
'audiolm-pytorch>=1.2.28',
'naturalspeech2-pytorch>=0.0.41',
'beartype',
'einops>=0.6.1',
'lightning>=2.0.7',
'torch>=2.0',
'torchdiffeq',
'torchode',
'vocos'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| voicebox-pytorch-main | setup.py |
from voicebox_pytorch.voicebox_pytorch import (
Transformer,
EncodecVoco,
VoiceBox,
DurationPredictor,
ConditionalFlowMatcherWrapper,
)
| voicebox-pytorch-main | voicebox_pytorch/__init__.py |
import math
from random import random
from functools import partial
import logging
import torch
from torch import nn, Tensor, einsum, IntTensor, FloatTensor, BoolTensor
from torch.nn import Module
import torch.nn.functional as F
import torchode as to
from torchdiffeq import odeint
from beartype import beartype
from beartype.typing import Tuple, Optional
from einops.layers.torch import Rearrange
from einops import rearrange, repeat, reduce, pack, unpack
from voicebox_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss, maximum_path
from audiolm_pytorch import EncodecWrapper
import torchaudio.transforms as T
from torchaudio.functional import DB_to_amplitude
from vocos import Vocos
LOGGER = logging.getLogger(__file__)
# helper functions
def exists(val):
return val is not None
def identity(t):
return t
def default(val, d):
return val if exists(val) else d
def divisible_by(num, den):
return (num % den) == 0
def is_odd(n):
return not divisible_by(n, 2)
def coin_flip():
return random() < 0.5
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
# tensor helpers
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# mask construction helpers
def mask_from_start_end_indices(
seq_len: int,
start: Tensor,
end: Tensor
):
assert start.shape == end.shape
device = start.device
seq = torch.arange(seq_len, device = device, dtype = torch.long)
seq = seq.reshape(*((-1,) * start.ndim), seq_len)
seq = seq.expand(*start.shape, seq_len)
mask = seq >= start[..., None].long()
mask &= seq < end[..., None].long()
return mask
def mask_from_frac_lengths(
seq_len: int,
frac_lengths: Tensor
):
device = frac_lengths
lengths = (frac_lengths * seq_len).long()
max_start = seq_len - lengths
rand = torch.zeros_like(frac_lengths).float().uniform_(0, 1)
start = (max_start * rand).clamp(min = 0)
end = start + lengths
return mask_from_start_end_indices(seq_len, start, end)
# sinusoidal positions
class LearnedSinusoidalPosEmb(Module):
""" used by @crowsonkb """
def __init__(self, dim):
super().__init__()
assert divisible_by(dim, 2)
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
return fouriered
# rotary positional embeddings
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(Module):
def __init__(self, dim, theta = 10000):
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
@property
def device(self):
return self.inv_freq.device
def forward(self, seq_len):
t = torch.arange(seq_len, device = self.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim = -1)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(pos, t):
return t * pos.cos() + rotate_half(t) * pos.sin()
# convolutional positional generating module
class ConvPositionEmbed(Module):
def __init__(
self,
dim,
*,
kernel_size,
groups = None
):
super().__init__()
assert is_odd(kernel_size)
groups = default(groups, dim) # full depthwise conv by default
self.dw_conv1d = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size, groups = groups, padding = kernel_size // 2),
nn.GELU()
)
def forward(self, x):
x = rearrange(x, 'b n c -> b c n')
x = self.dw_conv1d(x)
return rearrange(x, 'b c n -> b n c')
# norms
class RMSNorm(Module):
def __init__(
self,
dim
):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
class AdaptiveRMSNorm(Module):
def __init__(
self,
dim,
cond_dim = None
):
super().__init__()
cond_dim = default(cond_dim, dim)
self.scale = dim ** 0.5
self.to_gamma = nn.Linear(cond_dim, dim)
self.to_beta = nn.Linear(cond_dim, dim)
# init to identity
nn.init.zeros_(self.to_gamma.weight)
nn.init.ones_(self.to_gamma.bias)
nn.init.zeros_(self.to_beta.weight)
nn.init.zeros_(self.to_beta.bias)
def forward(self, x, *, cond):
normed = F.normalize(x, dim = -1) * self.scale
gamma, beta = self.to_gamma(cond), self.to_beta(cond)
gamma, beta = map(lambda t: rearrange(t, 'b d -> b 1 d'), (gamma, beta))
return normed * gamma + beta
# attention
class Attention(Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dropout=0,
flash = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_inner = dim_head * heads
self.attend = Attend(dropout, flash = flash)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim, bias = False)
def forward(self, x, mask = None, rotary_emb = None):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
if exists(rotary_emb):
q, k = map(lambda t: apply_rotary_pos_emb(rotary_emb, t), (q, k))
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# feedforward
def FeedForward(dim, mult = 4):
return nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
# transformer
class Transformer(Module):
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
attn_dropout=0,
attn_flash = False,
adaptive_rmsnorm = False,
adaptive_rmsnorm_cond_dim_in = None
):
super().__init__()
assert divisible_by(depth, 2)
self.layers = nn.ModuleList([])
self.rotary_emb = RotaryEmbedding(dim = dim_head)
if adaptive_rmsnorm:
rmsnorm_klass = partial(AdaptiveRMSNorm, cond_dim = adaptive_rmsnorm_cond_dim_in)
else:
rmsnorm_klass = RMSNorm
for ind in range(depth):
layer = ind + 1
has_skip = layer > (depth // 2)
self.layers.append(nn.ModuleList([
nn.Linear(dim * 2, dim) if has_skip else None,
rmsnorm_klass(dim = dim),
Attention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=attn_flash),
rmsnorm_klass(dim = dim),
FeedForward(dim = dim, mult = ff_mult)
]))
self.final_norm = RMSNorm(dim)
def forward(
self,
x,
adaptive_rmsnorm_cond = None
):
skip_connects = []
rotary_emb = self.rotary_emb(x.shape[-2])
rmsnorm_kwargs = dict()
if exists(adaptive_rmsnorm_cond):
rmsnorm_kwargs = dict(cond = adaptive_rmsnorm_cond)
for skip_combiner, attn_prenorm, attn, ff_prenorm, ff in self.layers:
# in the paper, they use a u-net like skip connection
# unclear how much this helps, as no ablations or further numbers given besides a brief one-two sentence mention
if not exists(skip_combiner):
skip_connects.append(x)
else:
x = torch.cat((x, skip_connects.pop()), dim = -1)
x = skip_combiner(x)
attn_input = attn_prenorm(x, **rmsnorm_kwargs)
x = attn(attn_input, rotary_emb = rotary_emb) + x
ff_input = ff_prenorm(x, **rmsnorm_kwargs)
x = ff(ff_input) + x
return self.final_norm(x)
# encoder decoders
class AudioEncoderDecoder(nn.Module):
pass
class MelVoco(AudioEncoderDecoder):
def __init__(
self,
*,
log = True,
n_mels = 100,
sampling_rate = 24000,
f_max = 8000,
n_fft = 1024,
win_length = 640,
hop_length = 160,
pretrained_vocos_path = 'charactr/vocos-mel-24khz'
):
super().__init__()
self.log = log
self.n_mels = n_mels
self.n_fft = n_fft
self.f_max = f_max
self.win_length = win_length
self.hop_length = hop_length
self.sampling_rate = sampling_rate
self.vocos = Vocos.from_pretrained(pretrained_vocos_path)
@property
def latent_dim(self):
return self.num_mels
def encode(self, audio):
stft_transform = T.Spectrogram(
n_fft = self.n_fft,
win_length = self.win_length,
hop_length = self.hop_length,
window_fn = torch.hann_window
)
spectrogram = stft_transform(audio)
mel_transform = T.MelScale(
n_mels = self.n_mels,
sample_rate = self.sampling_rate,
n_stft = self.n_fft // 2 + 1,
f_max = self.f_max
)
mel = mel_transform(spectrogram)
if self.log:
mel = T.AmplitudeToDB()(mel)
mel = rearrange(mel, 'b d n -> b n d')
return mel
def decode(self, mel):
mel = rearrange(mel, 'b n d -> b d n')
if self.log:
mel = DB_to_amplitude(mel, ref = 1., power = 0.5)
return self.vocos.decode(mel)
class EncodecVoco(AudioEncoderDecoder):
def __init__(
self,
*,
pretrained_vocos_path = 'charactr/vocos-encodec-24khz',
bandwidth_id = 2
):
super().__init__()
self.encodec = EncodecWrapper()
self.vocos = Vocos.from_pretrained(pretrained_vocos_path)
self.register_buffer('bandwidth_id', torch.tensor([bandwidth_id]))
@property
def latent_dim(self):
return self.encodec.codebook_dim
def encode(self, audio):
encoded_audio, _, _ = self.encodec(audio, return_encoded = True)
return encoded_audio
def decode(self, latents):
_, codes, _ = self.encodec.rq(latents)
codes = rearrange(codes, 'b n q -> b q n')
all_audios = []
for code in codes:
features = self.vocos.codes_to_features(code)
audio = self.vocos.decode(features, bandwidth_id = self.bandwidth_id)
all_audios.append(audio)
return torch.stack(all_audios)
# both duration and main denoising model are transformers
class DurationPredictor(Module):
def __init__(
self,
*,
num_phoneme_tokens,
audio_enc_dec: Optional[AudioEncoderDecoder] = None,
dim_phoneme_emb = 512,
dim = 512,
depth = 10,
dim_head = 64,
heads = 8,
ff_mult = 4,
conv_pos_embed_kernel_size = 31,
conv_pos_embed_groups = None,
attn_dropout=0,
attn_flash = False,
p_drop_prob = 0.2, # p_drop in paper
frac_lengths_mask: Tuple[float, float] = (0.1, 1.),
aligner_kwargs: dict = dict(dim_in = 80, attn_channels = 80)
):
super().__init__()
self.audio_enc_dec = audio_enc_dec
if exists(audio_enc_dec) and dim != audio_enc_dec.latent_dim:
self.proj_in = nn.Linear(audio_enc_dec.latent_dim, dim)
else:
self.proj_in = nn.Identity()
self.null_phoneme_id = num_phoneme_tokens # use last phoneme token as null token for CFG
self.to_phoneme_emb = nn.Embedding(num_phoneme_tokens + 1, dim_phoneme_emb)
self.p_drop_prob = p_drop_prob
self.frac_lengths_mask = frac_lengths_mask
self.to_embed = nn.Linear(dim * 2 + dim_phoneme_emb, dim)
self.null_cond = nn.Parameter(torch.zeros(dim))
self.conv_embed = ConvPositionEmbed(
dim = dim,
kernel_size = conv_pos_embed_kernel_size,
groups = conv_pos_embed_groups
)
self.transformer = Transformer(
dim = dim,
depth = depth,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
attn_dropout=attn_dropout,
attn_flash = attn_flash
)
self.to_pred = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... 1 -> ...')
)
# aligner related
# if we are using mel spec with 80 channels, we need to set attn_channels to 80
# dim_in assuming we have spec with 80 channels
self.aligner = Aligner(dim_hidden = dim_phoneme_emb, **aligner_kwargs)
self.align_loss = ForwardSumLoss()
@property
def device(self):
return next(self.parameters()).device
@torch.inference_mode()
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1.:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
@beartype
def forward_aligner(
self,
x: FloatTensor, # (b, t, c)
x_mask: IntTensor, # (b, 1, t)
y: FloatTensor, # (b, t, c)
y_mask: IntTensor # (b, 1, t)
) -> Tuple[
FloatTensor, # alignment_hard: (b, t)
FloatTensor, # alignment_soft: (b, tx, ty)
FloatTensor, # alignment_logprob: (b, 1, ty, tx)
BoolTensor # alignment_mas: (b, tx, ty)
]:
attn_mask = rearrange(x_mask, 'b 1 t -> b 1 t 1') * rearrange(y_mask, 'b 1 t -> b 1 1 t')
alignment_soft, alignment_logprob = self.aligner(rearrange(y, 'b t c -> b c t'), x, x_mask)
assert not torch.isnan(alignment_soft).any()
alignment_mas = maximum_path(
rearrange(alignment_soft, 'b 1 t1 t2 -> b t2 t1').contiguous(),
rearrange(attn_mask, 'b 1 t1 t2 -> b t1 t2').contiguous()
)
alignment_hard = torch.sum(alignment_mas, -1).float()
alignment_soft = rearrange(alignment_soft, 'b 1 t1 t2 -> b t2 t1')
return alignment_hard, alignment_soft, alignment_logprob, alignment_mas
def forward(
self,
x,
*,
phoneme_ids,
mel,
cond,
cond_drop_prob = 0.,
target = None,
mask = None,
phoneme_len = None,
mel_len = None,
phoneme_mask = None,
mel_mask = None,
):
batch, seq_len, cond_dim = cond.shape
assert cond_dim == x.shape[-1]
x, cond = map(self.proj_in, (x, cond))
# construct mask if not given
if not exists(mask):
if coin_flip():
frac_lengths = torch.zeros((batch,), device = self.device).float().uniform_(*self.frac_lengths_mask)
mask = mask_from_frac_lengths(seq_len, frac_lengths)
else:
mask = prob_mask_like((batch, seq_len), self.p_drop_prob, self.device)
cond = cond * rearrange(~mask, '... -> ... 1')
# classifier free guidance
if cond_drop_prob > 0.:
cond_drop_mask = prob_mask_like(cond.shape[:1], cond_drop_prob, cond.device)
cond = torch.where(
rearrange(cond_drop_mask, '... -> ... 1 1'),
self.null_cond,
cond
)
phoneme_ids = torch.where(
rearrange(cond_drop_mask, '... -> ... 1'),
self.null_phoneme_id,
phoneme_ids
)
phoneme_emb = self.to_phoneme_emb(phoneme_ids)
# aligner
# use alignment_hard to oversample phonemes
# Duration Predictor should predict the duration of unmasked phonemes where target is masked alignment_hard
should_align = all([exists(el) for el in (phoneme_len, mel_len, phoneme_mask, mel_mask)])
if should_align:
alignment_hard, _, alignment_logprob, _ = self.forward_aligner(phoneme_emb, phoneme_mask, mel, mel_mask)
target = alignment_hard
# combine audio, phoneme, conditioning
embed = torch.cat((x, phoneme_emb, cond), dim = -1)
x = self.to_embed(embed)
x = self.conv_embed(x) + x
x = self.transformer(x)
x = self.to_pred(x)
if not exists(target):
return x
if not exists(mask):
return F.l1_loss(x, target)
loss = F.l1_loss(x, target, reduction = 'none')
loss = loss.masked_fill(~mask, 0.)
# masked mean
num = reduce(loss, 'b n -> b', 'sum')
den = mask.sum(dim = -1).clamp(min = 1e-5)
loss = num / den
loss = loss.mean()
if not should_align:
return loss
#aligner loss
align_loss = self.align_loss(alignment_logprob, phoneme_len, mel_len)
loss = loss + align_loss
return loss
class VoiceBox(Module):
def __init__(
self,
*,
num_phoneme_tokens,
audio_enc_dec: Optional[AudioEncoderDecoder] = None,
dim_in = None,
dim_phoneme_emb = 1024,
dim = 1024,
depth = 24,
dim_head = 64,
heads = 16,
ff_mult = 4,
time_hidden_dim = None,
conv_pos_embed_kernel_size = 31,
conv_pos_embed_groups = None,
attn_dropout=0,
attn_flash = False,
p_drop_prob = 0.3, # p_drop in paper
frac_lengths_mask: Tuple[float, float] = (0.7, 1.)
):
super().__init__()
dim_in = default(dim_in, dim)
time_hidden_dim = default(time_hidden_dim, dim * 4)
self.audio_enc_dec = audio_enc_dec
if exists(audio_enc_dec) and dim != audio_enc_dec.latent_dim:
self.proj_in = nn.Linear(audio_enc_dec.latent_dim, dim)
else:
self.proj_in = nn.Identity()
self.sinu_pos_emb = nn.Sequential(
LearnedSinusoidalPosEmb(dim),
nn.Linear(dim, time_hidden_dim),
nn.SiLU()
)
self.null_phoneme_id = num_phoneme_tokens # use last phoneme token as null token for CFG
self.to_phoneme_emb = nn.Embedding(num_phoneme_tokens + 1, dim_phoneme_emb)
self.p_drop_prob = p_drop_prob
self.frac_lengths_mask = frac_lengths_mask
self.to_embed = nn.Linear(dim_in * 2 + dim_phoneme_emb, dim)
self.null_cond = nn.Parameter(torch.zeros(dim_in))
self.conv_embed = ConvPositionEmbed(
dim = dim,
kernel_size = conv_pos_embed_kernel_size,
groups = conv_pos_embed_groups
)
self.transformer = Transformer(
dim = dim,
depth = depth,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
attn_dropout=attn_dropout,
attn_flash = attn_flash,
adaptive_rmsnorm = True,
adaptive_rmsnorm_cond_dim_in = time_hidden_dim
)
dim_out = audio_enc_dec.latent_dim if exists(audio_enc_dec) else dim_in
self.to_pred = nn.Linear(dim, dim_out, bias = False)
@property
def device(self):
return next(self.parameters()).device
@torch.inference_mode()
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1.:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
*,
phoneme_ids,
cond,
times,
cond_drop_prob = 0.1,
target = None,
mask = None,
):
batch, seq_len, cond_dim = cond.shape
assert cond_dim == x.shape[-1]
# project in, in case codebook dim is not equal to model dimensions
x, cond = map(self.proj_in, (x, cond))
# auto manage shape of times, for odeint times
if times.ndim == 0:
times = repeat(times, '-> b', b = cond.shape[0])
if times.ndim == 1 and times.shape[0] == 1:
times = repeat(times, '1 -> b', b = cond.shape[0])
# construct mask if not given
if not exists(mask):
if coin_flip():
frac_lengths = torch.zeros((batch,), device = self.device).float().uniform_(*self.frac_lengths_mask)
mask = mask_from_frac_lengths(seq_len, frac_lengths)
else:
mask = prob_mask_like((batch, seq_len), self.p_drop_prob, self.device)
cond = cond * rearrange(~mask, '... -> ... 1')
# classifier free guidance
if cond_drop_prob > 0.:
cond_drop_mask = prob_mask_like(cond.shape[:1], cond_drop_prob, self.device)
cond = torch.where(
rearrange(cond_drop_mask, '... -> ... 1 1'),
self.null_cond,
cond
)
phoneme_ids = torch.where(
rearrange(cond_drop_mask, '... -> ... 1'),
self.null_phoneme_id,
phoneme_ids
)
phoneme_emb = self.to_phoneme_emb(phoneme_ids)
embed = torch.cat((x, phoneme_emb, cond), dim = -1)
x = self.to_embed(embed)
x = self.conv_embed(x) + x
time_emb = self.sinu_pos_emb(times)
# attend
x = self.transformer(x, adaptive_rmsnorm_cond = time_emb)
x = self.to_pred(x)
# if no target passed in, just return logits
if not exists(target):
return x
if not exists(mask):
return F.mse_loss(x, target)
loss = F.mse_loss(x, target, reduction = 'none')
loss = reduce(loss, 'b n d -> b n', 'mean')
loss = loss.masked_fill(~mask, 0.)
# masked mean
num = reduce(loss, 'b n -> b', 'sum')
den = mask.sum(dim = -1).clamp(min = 1e-5)
loss = num / den
return loss.mean()
# wrapper for the CNF
def is_probably_audio_from_shape(t):
return t.ndim == 2 or (t.ndim == 3 and t.shape[1] == 1)
class ConditionalFlowMatcherWrapper(Module):
@beartype
def __init__(
self,
voicebox: VoiceBox,
sigma = 0.,
ode_atol = 1e-5,
ode_rtol = 1e-5,
ode_step_size = 0.0625,
use_torchode = False,
torchdiffeq_ode_method = 'midpoint', # use midpoint for torchdiffeq, as in paper
torchode_method_klass = to.Tsit5, # use tsit5 for torchode, as torchode does not have midpoint (recommended by Bryan @b-chiang)
cond_drop_prob = 0.
):
super().__init__()
self.sigma = sigma
self.voicebox = voicebox
self.cond_drop_prob = cond_drop_prob
self.use_torchode = use_torchode
self.torchode_method_klass = torchode_method_klass
self.odeint_kwargs = dict(
atol = ode_atol,
rtol = ode_rtol,
method = torchdiffeq_ode_method,
options = dict(step_size = ode_step_size)
)
@property
def device(self):
return next(self.parameters()).device
@torch.inference_mode()
def sample(
self,
*,
phoneme_ids,
cond,
mask = None,
steps = 3,
cond_scale = 1.,
decode_to_audio = True
):
shape = cond.shape
batch = shape[0]
# take care of condition as raw audio
cond_is_raw_audio = is_probably_audio_from_shape(cond)
if cond_is_raw_audio:
assert exists(self.voicebox.audio_enc_dec)
self.voicebox.audio_enc_dec.eval()
cond = self.voicebox.audio_enc_dec.encode(cond)
self.voicebox.eval()
def fn(t, x, *, packed_shape = None):
if exists(packed_shape):
x = unpack_one(x, packed_shape, 'b *')
out = self.voicebox.forward_with_cond_scale(
x,
times = t,
phoneme_ids = phoneme_ids,
cond = cond,
cond_scale = cond_scale
)
if exists(packed_shape):
out = rearrange(out, 'b ... -> b (...)')
return out
y0 = torch.randn_like(cond)
t = torch.linspace(0, 1, steps, device = self.device)
if not self.use_torchode:
LOGGER.debug('sampling with torchdiffeq')
trajectory = odeint(fn, y0, t, **self.odeint_kwargs)
sampled = trajectory[-1]
else:
LOGGER.debug('sampling with torchode')
t = repeat(t, 'n -> b n', b = batch)
y0, packed_shape = pack_one(y0, 'b *')
fn = partial(fn, packed_shape = packed_shape)
term = to.ODETerm(fn)
step_method = self.torchode_method_klass(term = term)
step_size_controller = to.IntegralController(
atol = self.odeint_kwargs['atol'],
rtol = self.odeint_kwargs['rtol'],
term = term
)
solver = to.AutoDiffAdjoint(step_method, step_size_controller)
jit_solver = torch.compile(solver)
init_value = to.InitialValueProblem(y0 = y0, t_eval = t)
sol = jit_solver.solve(init_value)
sampled = sol.ys[:, -1]
sampled = unpack_one(sampled, packed_shape, 'b *')
if not decode_to_audio or not exists(self.voicebox.audio_enc_dec):
return sampled
return self.voicebox.audio_enc_dec.decode(sampled)
def forward(
self,
x1,
*,
phoneme_ids,
cond,
mask = None
):
"""
following eq (5) (6) in https://arxiv.org/pdf/2306.15687.pdf
"""
batch, seq_len, dtype, σ = *x1.shape[:2], x1.dtype, self.sigma
# if raw audio is given, convert if audio encoder / decoder was passed in
input_is_raw_audio, cond_is_raw_audio = map(is_probably_audio_from_shape, (x1, cond))
if any([input_is_raw_audio, cond_is_raw_audio]):
assert exists(self.voicebox.audio_enc_dec), 'audio_enc_dec must be set on VoiceBox to train directly on raw audio'
with torch.no_grad():
self.voicebox.audio_enc_dec.eval()
if input_is_raw_audio:
x1 = self.voicebox.audio_enc_dec.encode(x1)
if cond_is_raw_audio:
cond = self.voicebox.audio_enc_dec.encode(cond)
# x0 is gaussian noise
x0 = torch.randn_like(x1)
# random times
times = torch.rand((batch,), dtype = dtype, device = self.device)
t = rearrange(times, 'b -> b 1 1')
# sample xt (w in the paper)
w = (1 - (1 - σ) * t) * x0 + t * x1
flow = x1 - (1 - σ) * x0
# predict
self.voicebox.train()
loss = self.voicebox(
w,
phoneme_ids = phoneme_ids,
cond = cond,
mask = mask,
times = times,
target = flow,
cond_drop_prob = self.cond_drop_prob
)
return loss
| voicebox-pytorch-main | voicebox_pytorch/voicebox_pytorch.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = FlashAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(False, True, True)
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
| voicebox-pytorch-main | voicebox_pytorch/attend.py |
from setuptools import setup, find_packages
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '1.21.2',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/x-transformers',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'torch>=1.6',
'einops>=0.6.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| x-transformers-main | setup.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
from x_transformers.autoregressive_wrapper import top_p, top_k, eval_decorator
# helper functions
def exists(val):
return val is not None
def divisible_by(numer, denom):
return (numer % denom) == 0
# xl autoregressive wrapper class
class XLAutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
mems = None,
**kwargs
):
device, max_seq_len = start_tokens.device, self.max_seq_len
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
*all_leading_tokens, _ = start_tokens.split(max_seq_len, dim = -1)
# catch the memory up to the current segment
for leading_tokens in all_leading_tokens:
_, mems = self.net(
leading_tokens,
mems = mems,
return_mems = True,
**kwargs
)
# now start sampling from the current segment
curr_pos = len(all_leading_tokens) * max_seq_len
curr_mems = mems
out = start_tokens
for _ in range(seq_len):
curr_segment_len = out.shape[-1]
is_last_segment_tokens = divisible_by(curr_segment_len, max_seq_len)
x = out[:, curr_pos:]
logits, mems = self.net(
x,
mems = curr_mems,
return_mems = True,
**kwargs
)
logits = logits[:, -1]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
if is_last_segment_tokens:
curr_pos = curr_segment_len
curr_mems = mems
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(
self,
x,
mems = None,
**kwargs
):
ignore_index, max_seq_len = self.ignore_index, self.max_seq_len
x, labels = x[:, :-1], x[:, 1:]
seq_len = x.shape[1]
# prepare chunks
split_x = x.split(max_seq_len, dim = -1)
split_labels = labels.split(max_seq_len, dim = -1)
loss_weights = tuple(map(lambda t: t.shape[-1] / seq_len, split_x))
# go through each chunk and derive weighted losses
total_loss = 0.
for chunk, chunk_labels, loss_weight in zip(split_x, split_labels, loss_weights):
logits, mems = self.net(
chunk,
mems = mems,
return_mems = True,
**kwargs
)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
chunk_labels,
ignore_index = ignore_index
)
total_loss = total_loss + loss * loss_weight
return total_loss
| x-transformers-main | x_transformers/xl_autoregressive_wrapper.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.,
add_attn_z_loss = False
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
# whether to add router z-loss
self.add_attn_z_loss = add_attn_z_loss
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
restrict_to_max_seq_len = True,
**kwargs
):
max_seq_len = self.max_seq_len
device = start_tokens.device
num_dims = start_tokens.ndim
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
cache = None
for _ in range(seq_len):
if restrict_to_max_seq_len:
x = out[:, -max_seq_len:]
if exists(cache):
for inter in cache.attn_intermediates:
inter.cached_kv = [t[..., -(max_seq_len - 1):, :] for t in inter.cached_kv]
logits, cache = self.net(
x,
return_intermediates = True,
cache = cache,
**kwargs
)
logits = logits[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, **kwargs):
seq, ignore_index, add_attn_z_loss = x.shape[1], self.ignore_index, self.add_attn_z_loss
inp, target = x[:, :-1], x[:, 1:]
inp = torch.where(inp == ignore_index, self.pad_value, inp)
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits, cache = self.net(
inp,
return_intermediates = True,
return_attn_z_loss = add_attn_z_loss,
**kwargs
)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if add_attn_z_loss:
loss = loss + cache.attn_z_loss
return loss
| x-transformers-main | x_transformers/autoregressive_wrapper.py |
import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from collections import namedtuple
from dataclasses import dataclass
from typing import List, Callable, Optional
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange
from x_transformers.attend import Attend, Intermediates, CascadingHeads
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
# constants
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def divisible_by(num, den):
return (num % den) == 0
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert divisible_by(dim, 2)
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
rot_dim, seq_len = freqs.shape[-1], t.shape[-2]
freqs = freqs[-seq_len:, :]
# partial rotary embeddings, Wang et al. GPT-J
t, t_unrotated = t[..., :rot_dim], t[..., rot_dim:]
t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
return torch.cat((t, t_unrotated), dim = -1)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
kv_heads = None,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
rotary_embed_values = False,
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both'
value_dim_head = default(value_dim_head, dim_head)
kv_heads = default(kv_heads, heads)
kv_heads = 1 if one_kv_head else kv_heads
assert divisible_by(heads, kv_heads)
self.kv_heads = kv_heads
q_dim = dim_head * heads
k_dim = dim_head * kv_heads
v_dim = value_dim_head * kv_heads
out_dim = value_dim_head * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(heads, 1, dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(heads, 1, dim_head))
assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
if cascading_heads:
# cascading heads - wrap the Attend logic
self.attend = CascadingHeads(self.attend)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# whether to rotate positions into values, for absolute positions in addition to relative
self.rotary_embed_values = rotary_embed_values
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None,
return_intermediates = False,
cache: Optional[Intermediates] = None,
):
b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input, mem_packed_shape = pack([mem, k_input], 'b * d')
v_input, _ = pack([mem, v_input], 'b * d')
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r))
if exists(cache) and not has_context:
ck, cv = cache.cached_kv
if exists(mem):
mk, k = unpack(k, mem_packed_shape, 'b * d')
mv, v = unpack(v, mem_packed_shape, 'b * d')
k = torch.cat((ck, k), dim = -2)
v = torch.cat((cv, v), dim = -2)
if exists(mem):
k = torch.cat((mk, k), dim = -2)
v = torch.cat((mv, v), dim = -2)
if return_intermediates:
mem_len = mem.shape[-2] if exists(mem) else 0
cached_kv = (k[..., mem_len:, :], v[..., mem_len:, :])
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
q = apply_rotary_pos_emb(q, freqs, q_xpos_scale)
k = apply_rotary_pos_emb(k, freqs, k_xpos_scale)
if self.rotary_embed_values:
v = apply_rotary_pos_emb(v, freqs, k_xpos_scale)
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
if not return_intermediates:
return out
intermediates.cached_kv = cached_kv
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.causal = causal
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
cache: Optional[LayerIntermediates] = None,
cache_age = 1,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
# initialize accums
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
# rotary positions
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
# assume cached key / values
attn_cache = []
if exists(cache):
assert not self.training and self.causal
if cache_age > 0:
x = x[:, -cache_age:] # for spec decoding, may be greater than 1
attn_cache = cache.attn_intermediates
iter_attn_cache = iter(attn_cache)
# outer residual - for resiDual paper
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, cache = next(iter_attn_cache, None), mem = layer_mem, return_intermediates = True)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn, cache = next(iter_attn_cache, None), return_intermediates = True)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if not return_hiddens:
return x
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
cache: Optional[LayerIntermediates] = None,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, cache = cache, return_hiddens = True, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class ContinuousTransformerWrapper(nn.Module):
def __init__(
self,
*,
max_seq_len,
attn_layers,
dim_in = None,
dim_out = None,
emb_dim = None,
max_mem_len = 0,
post_emb_norm = False,
emb_dropout = 0.,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
self.attn_layers = attn_layers
self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
def forward(
self,
x,
return_embeddings = False,
return_intermediates = False,
return_mems = False,
mask = None,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
**kwargs
):
x = self.project_in(x)
x = x + self.pos_emb(x, pos = pos)
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
_, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
x = self.emb_dropout(x)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
out = self.project_out(x) if not return_embeddings else x
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), hiddens))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class XTransformer(nn.Module):
def __init__(
self,
*,
dim,
tie_token_emb = False,
ignore_index = -100,
pad_value = 0,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
self.encoder = TransformerWrapper(
**enc_transformer_kwargs,
attn_layers = Encoder(dim = dim, **enc_kwargs)
)
self.decoder = TransformerWrapper(
**dec_transformer_kwargs,
attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
)
if tie_token_emb:
self.decoder.token_emb = self.encoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
@torch.no_grad()
def generate(self, seq_in, seq_out_start, seq_len, mask = None, attn_mask = None, **kwargs):
encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
if exists(src_prepend_embeds) and exists(mask):
mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
if self.training and self.cross_attn_tokens_dropout > 0:
enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
out = self.decoder(tgt, context = enc, context_mask = mask)
return out
| x-transformers-main | x_transformers/x_transformers.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from x_transformers.x_transformers import XTransformer, Encoder, Decoder, CrossAttender, Attention, TransformerWrapper, ViTransformerWrapper, ContinuousTransformerWrapper
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
from x_transformers.nonautoregressive_wrapper import NonAutoregressiveWrapper
from x_transformers.continuous_autoregressive_wrapper import ContinuousAutoregressiveWrapper
from x_transformers.xl_autoregressive_wrapper import XLAutoregressiveWrapper
| x-transformers-main | x_transformers/__init__.py |
import torch
from torch import nn
import torch.nn.functional as F
def exists(val):
return val is not None
class ContinuousAutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, **kwargs):
device = start_tokens.device
was_training = self.net.training
num_dims = len(start_tokens.shape)
assert num_dims >= 2, 'number of dimensions of your start tokens must be greater or equal to 2'
if num_dims == 2:
start_tokens = start_tokens[None, :]
b, t, _, device = *start_tokens.shape, start_tokens.device
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
last = self.net(x, **kwargs)[:, -1:]
out = torch.cat((out, last), dim = -2)
out = out[:, t:]
if num_dims == 2:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, **kwargs):
inp, target = x[:, :-1], x[:, 1:]
mask = kwargs.get('mask', None)
if exists(mask) and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs['mask'] = mask
out = self.net(inp, **kwargs)
loss = F.mse_loss(out, target, reduction = 'none')
if exists(mask):
loss = loss[mask]
return loss.mean()
| x-transformers-main | x_transformers/continuous_autoregressive_wrapper.py |
from functools import partial
from typing import Optional, Tuple
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange, repeat
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
cached_kv: Optional[Tuple[Tensor, Tensor]] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
# handle grouped multi-query attention
if kv_heads == 1:
k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v))
elif kv_heads < heads:
k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v))
# handle zero kv, as means for allowing network to attend to nothing
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates
| x-transformers-main | x_transformers/attend.py |
import math
from random import random
from contextlib import nullcontext
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange, repeat, pack, unpack
from x_transformers.x_transformers import TransformerWrapper
from typing import Optional
# constants
Losses = namedtuple('Losses', ['loss', 'generator_loss', 'critic_loss'])
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# sampling helpers
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim = -1)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(2, ind, val)
return probs
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
# prob helpers
def sample_prob(prob):
return random() < prob
def coin_flip():
return sample_prob(0.5)
# tensor helpers
def get_mask_subset_prob(mask, prob, min_mask = 0):
batch, seq, device = *mask.shape, mask.device
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
# schedules
def linear_schedule(t):
return 1 - t
def cosine_schedule(t):
""" https://arxiv.org/abs/2202.04200 """
return torch.cos(t * math.pi / 2)
# self token critic
# inspired by Nijkamp et al. - https://aclanthology.org/2021.naacl-main.409/
class SelfCritic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
dim = net.attn_layers.dim
self.to_logits = nn.Linear(dim, 1)
def forward(self, x):
embed = self.net(x, return_embeddings = True)
return self.to_logits(embed)
class NonAutoregressiveWrapper(nn.Module):
"""
https://arxiv.org/abs/1904.09324
https://arxiv.org/abs/2202.04200
"""
def __init__(
self,
net,
*,
mask_id,
steps = 18,
self_cond = False,
self_cond_train_prob = 0.75,
no_replace_prob = 0.15, # which percentage of the tokens masked will stay the same, done in original MLM paper
random_token_prob = 0.1, # which percentage of tokens to be replaced with random token, done in original MLM paper
schedule = 'linear',
can_mask_prev_unmasked = False, # when unmasking, whether it can remask previously unmasked
token_critic: Optional[TransformerWrapper] = None,
self_token_critic = False,
critic_loss_weight = 1.
):
super().__init__()
assert not (self_token_critic and exists(token_critic))
self.net = net
dim = net.emb_dim
self.dim = dim
self.num_tokens = net.num_tokens
self.mask_id = mask_id
# afaict, maskgit paper did not do this
# but may help for self conditioning, as used successfully in original BERT
self.no_replace_prob = no_replace_prob
self.random_token_prob = random_token_prob
self.max_seq_len = net.max_seq_len
self.steps = steps
if callable(schedule):
self.schedule_fn = schedule
if schedule == 'linear':
self.schedule_fn = linear_schedule
elif schedule == 'cosine':
self.schedule_fn = cosine_schedule
else:
raise ValueError(f'invalid schedule {schedule}')
self.can_mask_prev_unmasked = can_mask_prev_unmasked
# self conditioning
self.self_cond = self_cond
if self_cond:
self.null_embed = nn.Parameter(torch.randn(dim))
self.to_self_cond = nn.Linear(dim, dim, bias = False) if self_cond else None
self.self_cond_train_prob = self_cond_train_prob
# token critic
self.token_critic = token_critic
if self_token_critic:
self.token_critic = SelfCritic(net)
self.critic_loss_weight = critic_loss_weight
@torch.no_grad()
def generate(
self,
batch_size = None,
start_temperature = 1.,
filter_thres = 0.7,
noise_level_scale = 1.,
**kwargs
):
sample_one = not exists(batch_size)
batch_size = default(batch_size, 1)
device = next(self.net.parameters()).device
was_training = self.training
self.eval()
times = torch.linspace(0., 1., self.steps + 1)
# sequence starts off as all masked
shape = (batch_size, self.max_seq_len)
seq = torch.full(shape, self.mask_id, device = device)
mask = torch.full(shape, True, device = device)
# slowly demask
all_mask_num_tokens = (self.schedule_fn(times[1:]) * self.max_seq_len).long()
# self conditioning
has_self_cond = self.self_cond
last_embed = self.null_embed if has_self_cond else None
for mask_num_tokens, steps_until_x0 in zip(all_mask_num_tokens.tolist(), reversed(range(self.steps))):
self_cond = self.to_self_cond(last_embed) if has_self_cond else None
logits, embeds = self.net(
seq,
sum_embeds = self_cond,
return_logits_and_embeddings = True,
**kwargs
)
if has_self_cond:
last_embed = embeds
if exists(filter_thres):
logits = top_k(logits, filter_thres)
annealing_scale = steps_until_x0 / self.steps
temperature = start_temperature * annealing_scale
probs = (logits / max(temperature, 1e-3)).softmax(dim = -1)
sampled_ids = gumbel_sample(logits, temperature = max(temperature, 1e-3))
seq = torch.where(mask, sampled_ids, seq)
if exists(self.token_critic):
scores = self.token_critic(seq)
scores = rearrange(scores, 'b n 1 -> b n')
scores = scores + noise_level_scale * gumbel_noise(scores) * annealing_scale
else:
scores = 1 - logits.softmax(dim = -1)
scores = scores.gather(2, rearrange(sampled_ids, 'b n -> b n 1'))
scores = rearrange(scores, 'b n 1 -> b n')
if mask_num_tokens == 0:
pass
if not self.can_mask_prev_unmasked:
scores = scores.masked_fill(~mask, -torch.finfo(scores.dtype).max)
mask_indices = scores.topk(mask_num_tokens, dim = -1).indices
mask = torch.zeros_like(scores, dtype = torch.bool).scatter(1, mask_indices, True)
seq = seq.masked_fill(mask, self.mask_id)
self.train(was_training)
if sample_one:
seq = rearrange(seq, '1 n -> n')
return seq
def forward(
self,
x,
only_train_generator = False,
only_train_critic = False,
generator_sample_temperature = None,
**kwargs
):
b, n, device = *x.shape, x.device
assert n == self.max_seq_len
orig_seq = x.clone()
rand_times = torch.empty(b, device = device).uniform_(0, 1)
batched_randperm = torch.rand((b, n), device = device).argsort(dim = -1).float()
rand_probs = self.schedule_fn(rand_times)
num_tokens_mask = (rand_probs * n).clamp(min = 1.)
mask = batched_randperm < rearrange(num_tokens_mask, 'b -> b 1')
# to ensure all tokens produce embeddings, instead of just the ones with [mask] input, as done in seminal BERT MLM paper
# potentially needed for self-conditioning (on embedding) to work well
replace_mask_id_mask = mask.clone()
frac_seq_left = 1.
if self.no_replace_prob > 0. and coin_flip():
frac_seq_left -= self.no_replace_prob
no_replace_prob_mask = get_mask_subset_prob(mask, self.no_replace_prob)
replace_mask_id_mask &= ~no_replace_prob_mask
if self.random_token_prob > 0. and coin_flip():
random_token_prob_mask = get_mask_subset_prob(replace_mask_id_mask, self.random_token_prob * frac_seq_left)
random_tokens = torch.randint(0, self.num_tokens, (b, n), device = device)
x = torch.where(random_token_prob_mask, random_tokens, x)
replace_mask_id_mask &= ~random_token_prob_mask
masked = torch.where(replace_mask_id_mask, self.mask_id, x)
# self conditioning
if self.self_cond:
self_cond = self.null_embed
if sample_prob(self.self_cond_train_prob):
with torch.no_grad():
self_cond = self.net(masked, return_embeddings = True, **kwargs).detach()
kwargs.update(sum_embeds = self.to_self_cond(self_cond))
# logits
context = torch.no_grad if only_train_critic else nullcontext
with context():
logits = self.net(masked, **kwargs)
# cross entropy loss
loss = F.cross_entropy(
logits[mask],
orig_seq[mask]
)
if not exists(self.token_critic) or only_train_generator:
return Losses(loss, loss, None)
sampled_ids = gumbel_sample(logits, temperature = default(generator_sample_temperature, random()))
generated = torch.where(mask, sampled_ids, orig_seq)
critic_logits = self.token_critic(generated)
critic_labels = (sampled_ids != orig_seq).float()
critic_loss = F.binary_cross_entropy_with_logits(
rearrange(critic_logits, '... 1 -> ...'),
critic_labels
)
# determine losses to be returned based on what researcher wants to train
if only_train_critic:
total_loss = critic_loss
loss = None
else:
total_loss = loss + critic_loss * self.critic_loss_weight
return Losses(total_loss, loss, critic_loss)
| x-transformers-main | x_transformers/nonautoregressive_wrapper.py |
import tqdm
import torch
import torch.optim as optim
from x_transformers import XTransformer
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 3e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, src.shape[1]).bool().cuda()
yield (src, tgt, src_mask)
# instantiate model
model = XTransformer(
dim = 512,
tie_token_emb = True,
return_tgt_loss = True,
enc_num_tokens=NUM_TOKENS,
enc_depth = 3,
enc_heads = 8,
enc_max_seq_len = ENC_SEQ_LEN,
dec_num_tokens = NUM_TOKENS,
dec_depth = 3,
dec_heads = 8,
dec_max_seq_len = DEC_SEQ_LEN
).cuda()
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
src, tgt, src_mask = next(cycle())
loss = model(src, tgt, mask=src_mask)
loss.backward()
print(f'{i}: {loss.item()}')
optim.step()
optim.zero_grad()
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
src, _, src_mask = next(cycle())
src, src_mask = src[:1], src_mask[:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
sample = model.generate(src, start_tokens, ENC_SEQ_LEN, mask = src_mask)
incorrects = (src != sample).abs().sum()
print(f"input: ", src)
print(f"predicted output: ", sample)
print(f"incorrects: {incorrects}")
| x-transformers-main | examples/toy_tasks/enc_dec_copy.py |
from x_transformers import (
TransformerWrapper,
Encoder,
NonAutoregressiveWrapper
)
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e8)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 250
SEQ_LEN = 256
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
model = TransformerWrapper(
num_tokens = 256 + 1,
logits_dim = 256,
max_seq_len = SEQ_LEN,
attn_layers = Encoder(
dim = 512,
depth = 8,
heads = 8,
dynamic_pos_bias = True
)
)
model = NonAutoregressiveWrapper(
model,
steps = 18,
schedule = 'cosine',
mask_id = 256, # mask id is last token, which is why num_tokens above has a +1 (special token)
self_token_critic = True
)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader)).loss
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
val_data = next(val_loader)
loss = model(val_data).loss
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
sample = model.generate()
output_str = decode_tokens(sample)
print(output_str)
| x-transformers-main | examples/enwik8_simple/train_nar.py |
from x_transformers import TransformerWrapper, Decoder
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TransformerWrapper(
num_tokens = 256,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 512, depth = 6, heads = 8)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE, drop_last = True))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| x-transformers-main | examples/enwik8_simple/train.py |
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
model = AutoModelForCausalLM.from_pretrained("lightonai/RITA_s", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("lightonai/RITA_s")
rita_gen = pipeline('text-generation', model=model, tokenizer=tokenizer)
sequences = rita_gen("MAB", max_length=20, do_sample=True, top_k=950, repetition_penalty=1.2,
num_return_sequences=2, eos_token_id=2)
for seq in sequences:
print(f"seq: {seq['generated_text'].replace(' ', '')}")
| RITA-master | example.py |
import argparse
from itertools import product
import torch
from torch import einsum
assert torch.cuda.is_available(), 'cuda must be available to run benchmark'
from flash_cosine_sim_attention.benchmark import benchmark
from flash_cosine_sim_attention import flash_cosine_sim_attention, l2norm_tensors
# helper functions
def exists(t):
return t is not None
def cast_tuple(t):
return t if isinstance(t, tuple) else (t,)
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--causal', default = False, action = 'store_true')
parser.add_argument('--mask-prob', type = float, default = 0.)
parser.add_argument('--only-forwards', default = False, action = 'store_true')
parser.add_argument('--only-backwards', default = False, action = 'store_true')
parser.add_argument('--num-times', default = 20, type = int)
args = parser.parse_args()
# constants
BATCH_SIZES = 4
HEADS = 8
DIM = 64
CAUSAL = args.causal
SHOULD_MASK = args.mask_prob > 0.
assert args.mask_prob >= 0 and args.mask_prob < 1.
assert not (args.only_forwards and args.only_backwards)
assert not (CAUSAL and SHOULD_MASK)
TEST_SEQUENCE_LENGTHS = (128, 256, 512, 1024, 2048, 4096, 8192)
TEST_FORWARDS = not args.only_backwards
TEST_BACKWARDS = not args.only_forwards
# simplified cosine sim attention for benchmarking
def simplified_cosine_sim_attention(
q,
k,
v,
scale = 10,
l2norm_qk = True,
causal_mask = None,
mask = None
):
if l2norm_qk:
q, k = l2norm_tensors(q, k)
sim = einsum(f'b h i d, b h j d -> b h i j', q, k)
sim = sim * scale
if exists(mask):
sim = sim.masked_fill(~mask[:, None, None, :], -torch.finfo(sim.dtype).max)
if exists(causal_mask):
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
return einsum(f'b h i j, b h j d -> b h i d', attn, v)
# create benchmark function
fused_attention_fn = benchmark(
flash_cosine_sim_attention,
forwards = TEST_FORWARDS,
backwards = TEST_BACKWARDS,
num_times = args.num_times
)
attention_fn = benchmark(
simplified_cosine_sim_attention,
forwards = TEST_FORWARDS,
backwards = TEST_BACKWARDS,
num_times = args.num_times
)
# all permutations
params = dict((
('batch size', BATCH_SIZES),
('heads', HEADS),
('feature dimension', DIM)
))
permutations = list(product(*map(cast_tuple, params.values())))
for name, dtype in (('float32', torch.float32), ('float16', torch.float16)):
for batch, heads, dim in permutations:
print('-' * 60)
print(f'{name}\t\tbatch: {batch}\theads: {heads}\tdim {dim}')
print('-' * 60)
for seq in TEST_SEQUENCE_LENGTHS:
q = torch.randn(batch, heads, seq, dim, dtype = dtype).cuda().requires_grad_()
k = torch.randn(batch, heads, seq, dim, dtype = dtype).cuda().requires_grad_()
v = torch.randn(batch, heads, seq, dim, dtype = dtype).cuda().requires_grad_()
causal_mask = torch.ones((seq, seq), dtype = torch.bool).cuda().triu(1)
fused_args = dict(causal = CAUSAL)
baseline_args = dict()
if CAUSAL:
baseline_args = {**baseline_args, 'causal_mask': causal_mask}
if SHOULD_MASK:
mask = torch.zeros((batch, seq)).float().cuda().uniform_(0, 1) > args.mask_prob
fused_args = {**fused_args, 'mask': mask}
baseline_args = {**baseline_args, 'mask': mask}
# run benchmarks accounting for oom for baseline
fused_time = fused_attention_fn(q, k, v, **fused_args)
try:
baseline_time = attention_fn(q, k, v, **baseline_args)
except:
torch.cuda.empty_cache()
baseline_time = -1
times_slower = (fused_time / baseline_time) if baseline_time != -1 else 0.
baseline_time_str = 'oom' if baseline_time == -1 else f"{baseline_time:.2f}ms"
print(f'seq_len: {seq}\tslower: {times_slower:.2f}x\tkernel: {fused_time:.2f}ms\tbaseline: {baseline_time_str}')
| flash-cosine-sim-attention-main | benchmark.py |
import sys
from functools import lru_cache
from subprocess import DEVNULL, call
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
# the following code was taken from
# https://github.com/teddykoker/torchsort/blob/main/setup.py
# which in turn was taken from
# https://github.com/idiap/fast-transformers/blob/master/setup.py
exec(open('flash_cosine_sim_attention/version.py').read())
@lru_cache(None)
def cuda_toolkit_available():
try:
call(["nvcc"], stdout = DEVNULL, stderr = DEVNULL)
return True
except FileNotFoundError:
return False
def compile_args():
args = ["-fopenmp", "-ffast-math"]
if sys.platform == "darwin":
args = ["-Xpreprocessor", *args]
return args
def ext_modules():
if not cuda_toolkit_available():
return []
return [
CUDAExtension(
__cuda_pkg_name__,
sources = ["flash_cosine_sim_attention/flash_cosine_sim_attention_cuda.cu"]
)
]
# main setup code
setup(
name = 'flash-cosine-sim-attention',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'Flash Cosine Similarity Attention',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/flash-cosine-sim-attention',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism'
],
install_requires=[
'torch>=1.10'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
ext_modules = ext_modules(),
cmdclass = {"build_ext": BuildExtension},
include_package_data = True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| flash-cosine-sim-attention-main | setup.py |
from flash_cosine_sim_attention.transformer import CosineSimCausalTransformer
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from torch.cuda.amp import autocast, GradScaler
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda-kernel', default = False, action = 'store_true')
parser.add_argument('--use-float32', default = False, action = 'store_true')
parser.add_argument('--seq-len', default = 1024, type = int)
args = parser.parse_args()
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = args.seq_len
USE_AMP = not args.use_float32
print(f'\ntraining at sequence length {args.seq_len} with {"float32" if args.use_float32 else "float16"}\n')
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = CosineSimCausalTransformer(
num_tokens = 256,
dim = 512,
depth = 8,
attn_scale = 1,
attn_l2norm_groups = 8,
dim_head = 64,
pre_norm = True,
non_cosine_sim_attn = False,
max_seq_len = SEQ_LEN,
use_cuda_kernel = args.use_cuda_kernel
)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
x = np.array(np.frombuffer(file.read(int(95e6)), dtype = np.uint8))
train_x, valid_x = np.split(x, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
scaler = GradScaler(enabled = USE_AMP)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
optim.zero_grad()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
with autocast(enabled = USE_AMP):
loss = model(next(train_loader), return_loss = True)
scaler.scale(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
scaler.unscale_(optim)
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
scaler.step(optim)
scaler.update()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f"\n\n {prime} \n\n {'-' * 80} \n")
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str + "\n\n")
| flash-cosine-sim-attention-main | train.py |
flash-cosine-sim-attention-main | tests/__init__.py |
|
import torch
import pytest
from flash_cosine_sim_attention import plain_cosine_sim_attention, flash_cosine_sim_attention
assert torch.cuda.is_available(), 'cuda must be available'
# helper functions
def not_nan_or_infs(t):
return not (torch.any(torch.isnan(t)) or torch.any(torch.isinf(t)))
def allclose(a, b, atol = 1e-4):
diff = (a - b).abs().amax()
if torch.any(diff > atol):
print(f'diff: {diff}')
return diff <= atol
def exists(t):
return t is not None
def maybe_cpu(t):
if not exists(t):
return None
return t.cpu()
# tests
@pytest.mark.parametrize('causal,mask', [(True, False), (False, True), (False, False)])
@pytest.mark.parametrize('attn_bias', [True, False])
@pytest.mark.parametrize('seq_len', [63, 127])
@pytest.mark.parametrize('dim_head', [32, 64, 96, 128])
@pytest.mark.parametrize('float16', [False, True])
@pytest.mark.parametrize('attn_bias_batch_dim', [False, True])
@pytest.mark.parametrize('single_head_kv', [False, True])
def test_output_equal(
causal,
mask,
attn_bias,
seq_len,
dim_head,
float16,
attn_bias_batch_dim,
single_head_kv
):
batch, heads = 4, 8
dtype, atol = (torch.float16, 1e-1) if float16 else (torch.float32, 1e-4)
kv_shape = (batch, heads, seq_len, dim_head) if not single_head_kv else (batch, seq_len, dim_head)
q = torch.randn(batch, heads, seq_len, dim_head, dtype = dtype).cuda()
k = torch.randn(kv_shape, dtype = dtype).cuda()
v = torch.randn(kv_shape, dtype = dtype).cuda()
attn_mask = torch.randint(0, 2, (batch, seq_len), dtype = torch.bool).cuda() if mask else None
bias = torch.randn(batch if attn_bias_batch_dim else heads, seq_len, seq_len, dtype = dtype).cuda() if attn_bias else None
plain_output = plain_cosine_sim_attention(q, k, v, causal = causal, mask = attn_mask, attn_bias = bias, attn_bias_batch_dim = attn_bias_batch_dim)
flash_output = flash_cosine_sim_attention(q, k, v, causal = causal, mask = attn_mask, attn_bias = bias, attn_bias_batch_dim = attn_bias_batch_dim)
assert not_nan_or_infs(flash_output)
assert allclose(plain_output, flash_output, atol = atol)
@pytest.mark.parametrize('causal,mask', [(True, False), (False, True), (False, False)])
@pytest.mark.parametrize('attn_bias', [True, False])
@pytest.mark.parametrize('seq_len', [63, 127])
@pytest.mark.parametrize('dim_head', [32, 64, 96, 128])
@pytest.mark.parametrize('float16', [False, True])
@pytest.mark.parametrize('attn_bias_batch_dim', [False, True])
@pytest.mark.parametrize('single_head_kv', [False, True])
def test_grad_equal(
causal,
mask,
attn_bias,
seq_len,
dim_head,
float16,
attn_bias_batch_dim,
single_head_kv
):
batch, heads = 4, 8
dtype, atol = (torch.float16, 1e-1) if float16 else (torch.float32, 1e-4)
kv_shape = (batch, heads, seq_len, dim_head)
q = torch.randn(batch, heads, seq_len, dim_head, dtype = dtype).cuda().requires_grad_()
k = torch.randn(kv_shape, dtype = dtype).cuda().requires_grad_()
v = torch.randn(kv_shape, dtype = dtype).cuda().requires_grad_()
attn_mask = torch.randint(0, 2, (batch, seq_len), dtype = torch.bool).cuda() if mask else None
bias = torch.randn(batch if attn_bias_batch_dim else heads, seq_len, seq_len, dtype = dtype).cuda().requires_grad_() if attn_bias else None
plain_output = plain_cosine_sim_attention(q, k, v, causal = causal, mask = attn_mask, attn_bias = bias, attn_bias_batch_dim = attn_bias_batch_dim)
plain_output.sum().backward()
dq, dk, dv = q.grad, k.grad, v.grad
db = bias.grad if attn_bias else None
q.grad, k.grad, v.grad = None, None, None
if attn_bias:
bias.grad = None
flash_output = flash_cosine_sim_attention(q, k, v, causal = causal, mask = attn_mask, attn_bias = bias, attn_bias_batch_dim = attn_bias_batch_dim)
flash_output.sum().backward()
fdq, fdk, fdv = q.grad, k.grad, v.grad
fdb = bias.grad if attn_bias else None
assert not_nan_or_infs(fdv)
assert not_nan_or_infs(fdk)
assert not_nan_or_infs(fdq)
assert allclose(dv, fdv, atol = atol)
assert allclose(dk, fdk, atol = atol)
assert allclose(dq, fdq, atol = atol)
if attn_bias:
assert not_nan_or_infs(fdb)
assert allclose(db, fdb, atol = atol)
# test cpu
@pytest.mark.parametrize('causal,mask', [(True, False), (False, True), (False, False)])
@pytest.mark.parametrize('attn_bias', [True, False])
@pytest.mark.parametrize('seq_len', [63, 127])
@pytest.mark.parametrize('dim_head', [32, 64, 96, 128])
@pytest.mark.parametrize('float16', [False, True])
@pytest.mark.parametrize('attn_bias_batch_dim', [False, True])
@pytest.mark.parametrize('single_head_kv', [False, True])
def test_output_equal_cuda_and_cpu_forward(
causal,
mask,
attn_bias,
seq_len,
dim_head,
float16,
attn_bias_batch_dim,
single_head_kv
):
batch, heads = 4, 8
dtype, atol = (torch.float16, 1e-1) if float16 else (torch.float32, 1e-4)
kv_shape = (batch, heads, seq_len, dim_head) if not single_head_kv else (batch, seq_len, dim_head)
q = torch.randn(batch, heads, seq_len, dim_head, dtype = dtype).cuda()
k = torch.randn(kv_shape, dtype = dtype).cuda()
v = torch.randn(kv_shape, dtype = dtype).cuda()
attn_mask = torch.randint(0, 2, (batch, seq_len), dtype = torch.bool).cuda() if mask else None
bias = torch.randn(batch if attn_bias_batch_dim else heads, seq_len, seq_len, dtype = dtype).cuda() if attn_bias else None
flash_output = flash_cosine_sim_attention(q, k, v, causal = causal, mask = attn_mask, attn_bias = bias, attn_bias_batch_dim = attn_bias_batch_dim)
flash_output_cpu = flash_cosine_sim_attention(q.cpu(), k.cpu(), v.cpu(), causal = causal, mask = maybe_cpu(attn_mask), attn_bias = maybe_cpu(bias), attn_bias_batch_dim = attn_bias_batch_dim)
assert allclose(flash_output.cpu(), flash_output_cpu, atol = atol)
| flash-cosine-sim-attention-main | tests/test.py |
__version__ = '0.1.40'
__cuda_pkg_name__ = f'flash_cosine_sim_attention_cuda_{__version__.replace(".", "_")}'
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/version.py |
import torch
from torch.cuda import synchronize, Event
from functools import wraps, partial
timer = partial(Event, enable_timing = True)
def benchmark(
fn,
*,
num_times = 10,
warmup_iters = 10,
forwards = True,
backwards = False
):
assert forwards or backwards
@wraps(fn)
def inner(*args, **kwargs):
# warmup
for _ in range(warmup_iters):
loss = fn(*args, **kwargs)
if backwards:
loss.sum().backward()
# average across number of function calls
all_measured_times_ms = 0.
for _ in range(num_times):
start_event = timer()
end_event = timer()
if forwards:
start_event.record()
o = fn(*args, **kwargs)
if not backwards:
end_event.record()
if not forwards:
start_event.record()
if backwards:
loss = o.sum()
loss.backward()
end_event.record()
synchronize()
elapsed_time_ms = start_event.elapsed_time(end_event)
all_measured_times_ms += elapsed_time_ms
return all_measured_times_ms / num_times
return inner
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/benchmark.py |
from flash_cosine_sim_attention.flash_cosine_sim_attention import flash_cosine_sim_attention, plain_cosine_sim_attention, l2norm_tensors, debug
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/__init__.py |
import torch
from functools import partial
from torch import nn, einsum
import torch.nn.functional as F
try:
from einops import rearrange
except ImportError:
print('pip install einops to use transformer')
from flash_cosine_sim_attention.flash_cosine_sim_attention import plain_cosine_sim_attention, flash_cosine_sim_attention
# helper function
def exists(val):
return val is not None
def init_weight_xavier_normal_(module, beta):
nn.init.xavier_normal_(module.weight.data, gain = beta)
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def non_cosine_sim_attn_fn(q, k, v, **kwargs):
q = q * (q.shape[-1] ** -0.5)
sim = einsum('b h i d, b h j d -> b h i j', q, k)
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = q.device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
return einsum('b h i j, b h j d -> b h i d', attn, v)
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# attention and feedforward
def FeedForward(dim, mult = 4, pre_norm = False):
dim_hidden = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim) if pre_norm else nn.Identity(),
nn.Linear(dim, dim_hidden, bias = False),
nn.GELU(),
nn.Linear(dim_hidden, dim, bias = False)
)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
scale = 8,
l2norm_groups = 1,
pre_norm = False,
use_cuda_kernel = False,
non_cosine_sim_attn = False,
**kwargs
):
super().__init__()
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim) if pre_norm else nn.Identity()
self.scale = scale
self.heads = heads
self.l2norm_groups = l2norm_groups
if non_cosine_sim_attn:
self.attn_fn = non_cosine_sim_attn_fn
elif use_cuda_kernel:
self.attn_fn = partial(flash_cosine_sim_attention, **kwargs)
else:
self.attn_fn = plain_cosine_sim_attention
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_k = nn.Linear(dim, inner_dim, bias = False)
self.to_v = nn.Linear(dim, inner_dim, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x):
h, scale, l2norm_groups = self.heads, self.scale, self.l2norm_groups
x = self.norm(x)
q, k, v = self.to_q(x), self.to_k(x), self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
o = self.attn_fn(q, k, v, causal = True, scale = scale, groups = l2norm_groups)
o = rearrange(o, 'b h n d -> b n (h d)')
return self.to_out(o)
# transformer for testing
class CosineSimCausalTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_seq_len,
depth,
attn_scale = 8,
attn_l2norm_groups = 1,
heads = 8,
dim_head = 64,
use_cuda_kernel = False,
pre_norm = False,
non_cosine_sim_attn = False,
**kwargs
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.residual_scale = 1 if pre_norm else ((2 * depth) ** 0.25)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim, dim_head = dim_head, heads = heads, use_cuda_kernel= use_cuda_kernel, scale = attn_scale, groups = attn_l2norm_groups, pre_norm = pre_norm, non_cosine_sim_attn = non_cosine_sim_attn, **kwargs),
nn.LayerNorm(dim) if not pre_norm else nn.Identity(),
FeedForward(dim, pre_norm = pre_norm),
nn.LayerNorm(dim) if not pre_norm else nn.Identity(),
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim) if pre_norm else nn.Identity(),
nn.Linear(dim, num_tokens, bias = False)
)
if not pre_norm:
self.init_(depth)
def init_(self, depth):
nn.init.normal_(self.token_emb.weight, std = 1e-5)
nn.init.normal_(self.pos_emb.weight, std = 1e-5)
init_gain = (8 * depth) ** -0.25
for attn, _, ff, _ in self.layers:
init_weight_xavier_normal_(attn.to_q, 1.)
init_weight_xavier_normal_(attn.to_k, 1.)
init_weight_xavier_normal_(attn.to_v, init_gain)
init_weight_xavier_normal_(attn.to_out, init_gain)
init_weight_xavier_normal_(ff[1], init_gain)
init_weight_xavier_normal_(ff[3], init_gain)
init_weight_xavier_normal_(self.to_logits[-1], 1)
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, temperature = 1., filter_thres = 0.9, **kwargs):
b, n, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.forward(out[:, -self.max_seq_len:], **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim = -1)
return out[:, n:]
def forward(self, x, return_loss = False):
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
x = self.token_emb(x)
x = x + self.pos_emb(torch.arange(x.shape[1], device = x.device))
for attn, attn_norm, ff, ff_norm in self.layers:
x = attn(x) + x * self.residual_scale
x = attn_norm(x)
x = ff(x) + x * self.residual_scale
x = ff_norm(x)
logits = self.to_logits(x)
if not return_loss:
return logits
loss = F.cross_entropy(rearrange(logits, 'b c n -> b n c'), labels)
return loss
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/transformer.py |
import os
import math
import importlib
from functools import partial, wraps
import torch
from torch import einsum
import torch.nn.functional as F
from torch.autograd import Function
exec(open(os.path.dirname(os.path.abspath(__file__)) + '/version.py').read())
# try to import cuda
try:
cuda_pkg = importlib.import_module(__cuda_pkg_name__)
forward = cuda_pkg.forward
backward = cuda_pkg.backward
debug = cuda_pkg.debug
except ImportError:
print('CUDA extension for flash-cosine-sim-attention was not compiled correctly - please run `pip install flash-cosine-sim-attention --force-reinstall --no-cache-dir`')
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def l2norm_cpu(t):
eps = 1e-12 if t.dtype == torch.float32 else 1e-3
norm = t.norm(dim = -1)
norm_clamped = torch.where(norm > eps, norm, eps)
return t / norm_clamped[..., None]
def l2norm(t):
if t.data.is_cuda:
return F.normalize(t, dim = -1)
return l2norm_cpu(t)
def grouped_l2norm(t, groups = 1):
shape = t.shape
dim = shape[-1]
t = t.reshape(*shape[:-1], groups, dim // groups)
t = l2norm(t)
return t.reshape(shape)
def l2norm_tensors(*tensors, groups = 1):
assert len(tensors) > 0
dtype = tensors[0].dtype
fn = partial(grouped_l2norm, groups = groups)
tensors = tuple(map(fn, tensors))
tensors = tuple(map(lambda t: t.type(dtype), tensors))
return tensors
# original cosine sim attention
# b - batch
# h - heads
# i - src sequence length
# j - target sequence length
# d - feature dimension
def plain_cosine_sim_attention(
q,
k,
v,
mask = None,
attn_bias = None,
scale = 8,
groups = 1,
causal = False,
l2norm_qk = True,
attn_bias_batch_dim = False
):
assert not (causal and exists(mask)), 'mask should not be supplied if causality is needed'
is_merged_batch_heads_query = q.ndim == 3
single_head_kv = k.ndim == 3
if is_merged_batch_heads_query:
assert k.ndim == 3 and v.ndim ==3, 'if batch and heads are merged for queries, keys and values must also similarly have only 3 dimensions'
attn_bias_batch_dim = True
q = q[:, None, ...]
if l2norm_qk:
q, k = l2norm_tensors(q, k, groups = groups)
kv_einsum_eq = 'b j d' if single_head_kv else 'b h j d'
sim = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k)
sim = sim * scale
if exists(attn_bias):
attn_bias = attn_bias.unsqueeze(1 if attn_bias_batch_dim else 0)
sim = sim + attn_bias
mask_value = -torch.finfo(sim.dtype).max
if causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = q.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, mask_value)
if exists(mask):
sim = sim.masked_fill(~mask[:, None, None, :], mask_value)
attn = sim.softmax(dim = -1)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
if is_merged_batch_heads_query:
out = out.squeeze(1)
return out
# cpu forwards
def flash_cosine_sim_attention_cpu(
q, k, v,
mask,
attn_bias,
scale,
causal,
attn_bias_batch_dim,
row_tile_size = 512,
col_tile_size = 512
):
needs_backwards = any([exists(t) and t.requires_grad for t in (q, k, v, attn_bias)])
assert not needs_backwards, 'cpu version does not support backwards'
assert not (causal and exists(mask)), 'mask should not be supplied if causality is needed'
dtype = q.dtype
q, k, v = q.float(), k.float(), v.float()
is_merged_batch_heads_query = q.ndim == 3
single_head_kv = k.ndim == 3
shape = q.shape
col_seq_len = k.shape[-2]
row_seq_len = q.shape[-2]
seq_len_diff = col_seq_len - row_seq_len
row_tiles = math.ceil(row_seq_len / row_tile_size)
col_tiles = math.ceil(col_seq_len / col_tile_size)
max_neg_value = -torch.finfo(q.dtype).max
if is_merged_batch_heads_query:
assert k.ndim == 3 and v.ndim ==3, 'if batch and heads are merged for queries, keys and values must also similarly have only 3 dimensions'
attn_bias_batch_dim = True
q = q.unsqueeze(1)
if exists(attn_bias):
attn_bias = attn_bias.unsqueeze(1 if attn_bias_batch_dim else 0)
kv_einsum_eq = 'b j d' if single_head_kv else 'b h j d'
# loop over rows and columns
o = torch.zeros_like(q)
l = torch.zeros((*q.shape[:-1], 1))
# prepare mask
if not exists(mask):
mask = (None,) * col_tiles
else:
mask = mask[:, None, None, :]
mask = mask.split(col_tile_size, dim = -1)
if not exists(attn_bias):
attn_bias = (None,) * row_tiles
else:
attn_bias = attn_bias.split(row_tile_size, dim = -2)
row_splits = zip(
q.split(row_tile_size, dim = -2),
o.split(row_tile_size, dim = -2),
l.split(row_tile_size, dim = -2),
attn_bias
)
for ind, (qc, oc, lc, bc) in enumerate(row_splits):
row_chunk_size = qc.shape[-2]
q_start_index = ind * row_tile_size + seq_len_diff
if not exists(bc):
bc = (None,) * col_tiles
else:
bc = bc.split(col_tile_size, dim = -1)
col_splits = zip(
k.split(col_tile_size, dim = -2),
v.split(col_tile_size, dim = -2),
mask,
bc
)
for k_ind, (kc, vc, maskc, bias) in enumerate(col_splits):
col_chunk_size = kc.shape[-2]
k_start_index = k_ind * col_tile_size
if causal and q_start_index >= (k_start_index + col_tile_size - 1):
continue
attn_weights = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', qc, kc) * scale
if exists(bias):
attn_weights += bias
if exists(maskc):
attn_weights.masked_fill_(~maskc, max_neg_value)
if causal and q_start_index < (k_start_index + col_tile_size - 1):
causal_mask = torch.ones((row_chunk_size, col_chunk_size), dtype = torch.bool).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
exp_weights = torch.exp(attn_weights - scale)
if exists(maskc):
exp_weights.masked_fill_(~maskc, 0.)
exp_values = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', exp_weights, vc)
oc.add_(exp_values)
lc.add_(exp_weights.sum(dim = -1, keepdim = True))
o.div_(l.clamp(min = 1e-12))
return o.reshape(shape).type(dtype)
# main class
class FlashCosineSimAttention(Function):
@staticmethod
def forward(
ctx,
q, k, v,
mask,
attn_bias,
scale,
causal,
attn_bias_batch_dim
):
o, inv_l, should_backwards = forward(
q, k, v,
mask,
attn_bias,
attn_bias_batch_dim,
scale,
causal
)
if not should_backwards:
return o
ctx.should_backwards = should_backwards
ctx.save_for_backward(o, inv_l, q, k, v, mask, attn_bias)
ctx.params = (
scale,
causal,
attn_bias_batch_dim
)
return o
@staticmethod
def backward(ctx, do):
assert ctx.should_backwards
o, inv_l, q, k, v, mask, attn_bias = ctx.saved_tensors
(
scale,
causal,
attn_bias_batch_dim
) = ctx.params
dq, dk, dv, db = backward(
do, o, inv_l,
q, k, v,
mask,
attn_bias,
attn_bias_batch_dim,
scale,
causal
)
return dq, dk, dv, None, db, None, None, None, None, None, None, None, None, None, None
flash_cosine_sim_attention_cuda = FlashCosineSimAttention.apply
# wrapper function
def flash_cosine_sim_attention(
q,
k,
v,
mask = None,
attn_bias = None,
scale = 8,
groups = 1,
causal = False,
l2norm_qk = True,
attn_bias_batch_dim = False
):
if l2norm_qk:
q, k = l2norm_tensors(q, k, groups = groups)
fn = flash_cosine_sim_attention_cuda if q.data.is_cuda else flash_cosine_sim_attention_cpu
o = fn(
q, k, v,
mask,
attn_bias,
scale,
causal,
attn_bias_batch_dim
)
return o
| flash-cosine-sim-attention-main | flash_cosine_sim_attention/flash_cosine_sim_attention.py |
from setuptools import setup, find_packages
setup(
name = 'marge-pytorch',
packages = find_packages(),
version = '0.2.9',
license='MIT',
description = 'Marge - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/marge-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'pre-training'
],
install_requires=[
'einops>=0.3',
'faiss-gpu',
'numpy',
'torch>=1.6',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | marge-pytorch-master | setup.py |
from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def default(value, default):
return value if value is not None else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > 1.0 - thres
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('src_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits, *_ = self.net(x, src_mask=input_mask, **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
input_mask = F.pad(input_mask, (1, 0), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, *args, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
m = kwargs.pop('input_mask', None)
xi, xo = x[:, :-1], x[:, 1:]
if m is not None:
assert m.shape == x.shape[0:2], 'input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle'
kwargs.update(input_mask = m[:, :-1])
out, *rest = self.net(xi, *args, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return (loss, *rest)
| marge-pytorch-master | marge_pytorch/autoregressive_wrapper.py |
from marge_pytorch.marge_pytorch import Marge, TrainingWrapper
from marge_pytorch.autoregressive_wrapper import AutoregressiveWrapper | marge-pytorch-master | marge_pytorch/__init__.py |
import faiss
import math
import numpy as np
from tqdm import tqdm
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn, einsum
import torch.nn.functional as F
from marge_pytorch.autoregressive_wrapper import AutoregressiveWrapper
# helpers
def identity(x, *args, **kwargs):
return x
def exists(x):
return x is not None
def default(x, d):
return x if exists(x) else d
def chunk(chunk_size, l):
for lo in range(0, l, chunk_size):
hi = min(l, lo + chunk_size)
yield slice(lo, hi)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
@contextmanager
def memmap(*args, **kwargs):
pointer = np.memmap(*args, **kwargs)
yield pointer
del pointer
# attention distillation loss
def distill_attn_loss(evi_dots, doc_similarities, mask = None, eps = 1e-5):
evi_dots = rearrange(evi_dots, 'b l h i n j -> b (l h i) n j')
if exists(mask):
mask = rearrange(mask, 'b n j -> b () n j')
evi_dots.masked_fill_(~mask, 0.)
denom = mask.expand_as(evi_dots).sum(dim = (1, -1))
evi_dots_mean = evi_dots.sum(dim = (1, -1)) / (denom + eps)
else:
evi_dots_mean = evi_dots.mean(dim = (1, -1))
normed_evi_dots = evi_dots_mean.softmax(dim = -1)
normed_evi_dots.detach_()
doc_similarities = doc_similarities.softmax(dim = -1).log()
loss = F.kl_div(doc_similarities, normed_evi_dots, reduction = 'batchmean')
return loss
# helper classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return F.gelu(gates) * x
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
# to keep the number of parameters / computation constant with respect to non-GLU variant
mult = int(mult / 3 * 2)
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class SelfAttention(nn.Module):
def __init__(self, dim, heads = 8, causal = True, dropout = 0.):
super().__init__()
self.scale = dim ** -0.5
self.heads = heads
self.causal = causal
self.to_qkv = nn.Linear(dim, dim * 3, bias = False)
self.to_out = nn.Linear(dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask = None):
_, n, _, h, device = *x.shape, self.heads, x.device
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', h = h, qkv = 3)
dots = einsum('bhid,bhjd->bhij', q, k) * self.scale
mask_value = max_neg_value(dots)
if exists(mask):
mask = mask[:, None, :, None] * mask[:, None, None, :]
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
causal_mask = torch.ones(n, n, device=device).triu_(1).bool()
dots.masked_fill_(causal_mask, mask_value)
del causal_mask
attn = dots.softmax(dim=-1)
attn = self.dropout(attn)
out = einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class CrossAttention(nn.Module):
def __init__(self, dim, heads = 8, dropout = 0.):
super().__init__()
self.scale = dim ** -0.5
self.heads = heads
self.to_q = nn.Linear(dim, dim, bias = False)
self.to_kv = nn.Linear(dim, dim * 2, bias = False)
self.beta = nn.Parameter(torch.tensor(1.), requires_grad=True)
self.to_out = nn.Linear(dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, context, doc_similarities, mask = None, context_mask = None):
b, n, _, h, device = *x.shape, self.heads, x.device
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
context_len = context.shape[2]
context = rearrange(context, 'b m n d -> b (m n) d')
context_mask = rearrange(context_mask, 'b m n -> b (m n)') if exists(context_mask) else None
doc_similarities = repeat(doc_similarities, 'b m -> b m n', n=context_len)
doc_similarities = rearrange(doc_similarities, 'b m n -> b (m n)')
doc_similarities = doc_similarities[:, None, None, :] * self.beta
kv = self.to_kv(context)
k, v = rearrange(kv, 'b n (kv h d) -> kv b h n d', h = h, kv = 2)
dots = einsum('bhid,bhjd->bhij', q, k) * self.scale
pre_attn_dots = dots
dots = dots + doc_similarities
if any(map(exists, (mask, context_mask))):
if not exists(mask):
mask = torch.full((b, n), True, dtype=torch.bool, device=device)
if not exists(context_mask):
context_mask = torch.full(context.shape[:2], True, dtype=torch.bool, device=device)
cross_mask = mask[:, None, :, None] * context_mask[:, None, None, :]
mask_value = max_neg_value(dots)
dots.masked_fill_(~cross_mask, mask_value)
del cross_mask
attn = dots.softmax(dim=-1)
attn = self.dropout(attn)
out = einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out, pre_attn_dots
class Encoder(nn.Module):
def __init__(self, dim, depth, retrieval_depth = 4, heads = 8, ff_mult = 4, attn_dropout = 0., ff_dropout = 0.):
super().__init__()
assert depth > retrieval_depth, f'Depth must be at least the depth set for the retrieval encoder ({retrieval_depth})'
block = lambda: nn.ModuleList([
PreNorm(dim, SelfAttention(dim, causal=False, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim, mult = ff_mult))
])
self.cls = nn.Parameter(torch.zeros(1, dim), requires_grad=True)
self.encoder_head = nn.ModuleList([])
self.encoder_tail = nn.ModuleList([])
for _ in range(retrieval_depth):
self.encoder_head.append(block())
for _ in range(depth - retrieval_depth):
self.encoder_tail.append(block())
def forward(self, x, src_mask = None, return_embed_only = False):
b, _, _ = x.shape
# append cls token
cls_token = repeat(self.cls, 'n d -> b n d', b=b)
x = torch.cat((cls_token, x), dim=1)
src_mask = F.pad(src_mask, (1, 0), value=True) if exists(src_mask) else None
for attn, ff in self.encoder_head:
x = attn(x, mask = src_mask) + x
x = ff(x) + x
cls_tokens = x[:, 0]
if return_embed_only:
return cls_tokens, None
for attn, ff in self.encoder_tail:
x = attn(x, mask = src_mask) + x
x = ff(x) + x
return x[:, 1:], cls_tokens
class Decoder(nn.Module):
def __init__(self, dim, depth, head_depth = 4, heads = 8, ff_mult = 4, attn_dropout = 0., ff_dropout = 0.):
super().__init__()
self.decoder_head = nn.ModuleList([])
self.decoder_tail = nn.ModuleList([])
for _ in range(head_depth):
self.decoder_head.append(nn.ModuleList([
PreNorm(dim, SelfAttention(dim, causal = True, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim))
]))
for _ in range(depth - head_depth):
self.decoder_tail.append(nn.ModuleList([
PreNorm(dim, SelfAttention(dim, causal = True, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim)),
PreNorm(dim, CrossAttention(dim, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim, mult = ff_mult))
]))
def forward(self, x, *, context, similarities, src_mask = None, context_mask = None):
for self_attn, self_ff in self.decoder_head:
x = self_attn(x, mask = src_mask) + x
x = self_ff(x) + x
cross_pre_attns = []
for self_attn, self_ff, cross_attn, cross_ff in self.decoder_tail:
x = self_attn(x, mask = src_mask) + x
x = self_ff(x) + x
x_out, attn = cross_attn(x, context, similarities, mask = src_mask, context_mask = context_mask)
x = x_out + x
x = cross_ff(x) + x
cross_pre_attns.append(attn)
return x, cross_pre_attns
class TransformerWrapper(nn.Module):
def __init__(self, num_tokens, dim, max_seq_len, layers, return_logits = False):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.max_seq_len = max_seq_len
self.layers = layers
self.to_logits = nn.Linear(dim, num_tokens) if return_logits else identity
def forward(self, x, *args, **kwargs):
b, n, device = *x.shape, x.device
assert n <= self.max_seq_len, f'your sequence length {n} needs to be less than or equal to the max sequence length {self.max_seq_len}'
x = self.token_emb(x)
x += self.pos_emb(torch.arange(n, device=device))
x, *out = self.layers(x, *args, **kwargs)
return (self.to_logits(x), *out)
class Marge(nn.Module):
def __init__(
self,
dim,
num_tokens = 20000,
max_seq_len = 1024,
enc_depth = 12,
enc_retrieval_depth = 4,
enc_heads = 8,
enc_ff_mult = 4,
enc_attn_dropout = 0.,
enc_ff_dropout = 0.,
dec_depth = 12,
dec_heads = 8,
dec_ff_mult = 16,
dec_attn_dropout = 0.,
dec_ff_dropout = 0.,
distill_attn = False,
distill_loss_coef = 1.
):
super().__init__()
self.dim = dim
self.encoder = TransformerWrapper(num_tokens, dim, max_seq_len, Encoder(dim, depth = enc_depth, retrieval_depth = enc_retrieval_depth, heads = enc_heads, ff_mult = enc_ff_mult, attn_dropout = enc_attn_dropout, ff_dropout = enc_ff_dropout))
self.decoder = TransformerWrapper(num_tokens, dim, max_seq_len, Decoder(dim, depth = dec_depth, heads = dec_heads, ff_mult = dec_ff_mult, attn_dropout = dec_attn_dropout, ff_dropout = dec_ff_dropout), return_logits = True)
self.encoder.token_emb = self.decoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder)
# experimental attn distillation settings
self.distill_attn = distill_attn
self.distill_loss_coef = distill_loss_coef
def get_embeds(self, documents, batch_size = 16, masks = None):
embeds = []
batched_documents = documents.split(batch_size)
batched_masks = masks.split(batch_size) if exists(masks) else ([None] * len(batched_documents))
for docs, mask in zip(batched_documents, batched_masks):
embed, *_ = self.encoder(docs, src_mask = mask, return_embed_only = True)
embeds.append(embed)
embeds = torch.cat(embeds)
return F.normalize(embeds, dim=-1)
@torch.no_grad()
def generate(self, prime, seq_len, evidence, mask = None, similarities = None):
b, num_evidences, *_ = evidence.shape
evidence = rearrange(evidence, 'b m n -> (b m) n')
enc_src_mask = rearrange(mask, 'b m n -> (b m) n') if exists(mask) else None
encodings, evidence_embeds = self.encoder(evidence, src_mask = enc_src_mask)
encodings = rearrange(encodings, '(b m) n d -> b m n d', m = num_evidences)
similarities = similarities if exists(similarities) else torch.ones((b, num_evidences)).float().cuda()
context_mask = F.pad(mask, (1, 0), value = True) if exists(mask) else None
return self.decoder.generate(prime, seq_len, context = encodings, similarities = similarities, context_mask = context_mask)
def forward(self, evidence, target, target_embeds, src_mask = None, tgt_mask = None):
num_evidences = evidence.shape[1]
evidence = rearrange(evidence, 'b m n -> (b m) n')
enc_src_mask = rearrange(src_mask, 'b m n -> (b m) n') if exists(src_mask) else None
encodings, evidence_embeds = self.encoder(evidence, src_mask = enc_src_mask)
encodings = rearrange(encodings, '(b m) n d -> b m n d', m = num_evidences)
evidence_embeds = rearrange(evidence_embeds, '(b m) d -> b m d', m = num_evidences)
similarities = einsum('bmd,bd->bm', evidence_embeds, target_embeds)
dec_src_mask = tgt_mask[:, :-1] if exists(tgt_mask) else None
loss, cross_attns = self.decoder(target, context = encodings, similarities = similarities, src_mask = dec_src_mask, context_mask = src_mask)
if self.distill_attn:
cross_attns = torch.stack(cross_attns, dim = 1)
cross_attns = rearrange(cross_attns, 'b l h i (n j) -> b l h i n j', n = num_evidences)
distill_loss = distill_attn_loss(cross_attns, similarities, mask = src_mask)
aux_loss = self.distill_loss_coef * distill_loss
loss = loss + aux_loss
return loss
# training related classes
def remove_target_from_evidence(evidence_ids, target_ids):
b, n = evidence_ids.shape
match_mask = evidence_ids == target_ids[:, None]
rows_without_matches = (match_mask.sum(axis=-1) == 0)[:, None]
remove_mask = np.concatenate((np.full((b, n - 1), False), rows_without_matches), axis=1)
mask = match_mask + remove_mask
filtered_ids = evidence_ids[~mask]
return filtered_ids.reshape(b, n - 1)
class DocumentDataset(Dataset):
def __init__(self, num_docs, doc_seq_len, num_evidences, documents_path, masks_path, num_targets, target_seq_len, target_path, target_masks_path):
super().__init__()
self.shape = (num_docs, doc_seq_len)
self.target_shape = (num_targets, target_seq_len)
self.knn_shape = (num_targets, num_evidences)
self.documents = np.memmap(documents_path, dtype=np.int32, shape=self.shape)
self.targets = np.memmap(target_path, dtype=np.int32, shape=self.target_shape)
self.masks = np.memmap(masks_path, dtype=np.bool, shape=self.shape) if exists(masks_path) else None
self.target_masks = np.memmap(target_masks_path, dtype=np.bool, shape=self.target_shape) if exists(target_masks_path) else None
self.knn = None
def set_knn_path(self, path):
if exists(self.knn):
del self.knn
self.knn = np.memmap(path, dtype=np.int32, shape=self.knn_shape)
def __len__(self):
return self.target_shape[0]
def __getitem__(self, ind):
assert exists(self.knn), 'The memmap path to the generated k nearest neighbors for evidences must be set for the dataset'
target_data = torch.from_numpy(self.targets[ind, :]).long()
target_masks = torch.from_numpy(self.target_masks[ind, :]) if exists(self.target_masks) else torch.ones_like(target_data).bool()
evidence_ids = self.knn[ind, :]
evidence_data = torch.from_numpy(self.documents[evidence_ids, :]).long()
evidence_masks = torch.from_numpy(self.masks[evidence_ids, :]) if exists(self.masks) else torch.ones_like(evidence_data).bool()
return target_data.cuda(), target_masks.cuda(), evidence_data.cuda(), evidence_masks.cuda()
class FaissANN():
def __init__(
self,
dim,
num_documents,
num_subvectors = 16,
hnsw_m = 32,
nbits = 8
):
super().__init__()
nlist = math.floor(math.sqrt(num_documents))
quantizer = faiss.IndexHNSWFlat(dim, hnsw_m)
index = faiss.IndexIVFPQ(quantizer, dim, nlist, num_subvectors, nbits)
self.index = faiss.index_cpu_to_all_gpus(index)
self.num_training = max(nlist * 10, 256)
def reset(self):
return self.index.reset()
def train(self, x):
return self.index.train(x)
def add(self, x):
return self.index.add(x)
def search(self, x, topk, nprobe=8):
self.index.nprobe = nprobe
return self.index.search(x, k=topk)
class TrainingWrapper(nn.Module):
def __init__(
self,
model,
*,
num_documents,
doc_seq_len,
documents_memmap_path,
masks_memmap_path = None,
num_targets = None,
target_seq_len = None,
target_memmap_path = None,
target_masks_memmap_path = None,
num_evidence = 4,
reindex_batch_size = 4,
use_faiss_ann = False
):
super().__init__()
self.dim = model.dim
self.num_evidence = num_evidence
self.model = model.cuda()
self.num_docs = num_documents
num_targets = default(num_targets, num_documents)
self.num_targets = num_targets
self.doc_shape = (num_documents, doc_seq_len)
self.documents_path = documents_memmap_path
self.separate_target_and_evidence = exists(target_memmap_path)
if self.separate_target_and_evidence:
assert exists(num_targets), 'number of target documents must be defined if target document set is different than evidence document set'
assert exists(target_seq_len), 'target sequence length must be specified'
else:
target_memmap_path = default(target_memmap_path, documents_memmap_path)
target_masks_memmap_path = default(target_masks_memmap_path, masks_memmap_path)
target_seq_len = default(target_seq_len, doc_seq_len)
self.target_shape = (num_targets, target_seq_len)
self.target_path = target_memmap_path
self.knn_path = f'{self.documents_path}.knn'
self.use_faiss_ann = use_faiss_ann
if use_faiss_ann:
self.index = FaissANN(self.dim, self.num_docs)
else:
index = faiss.IndexFlatL2(self.dim)
self.index = faiss.index_cpu_to_all_gpus(index)
self.reindex_batch_size = reindex_batch_size
self.reindex()
self.dataset = DocumentDataset(
num_documents,
doc_seq_len,
num_evidence,
documents_memmap_path,
masks_memmap_path,
num_targets,
target_seq_len,
target_memmap_path,
target_masks_memmap_path
)
self.dataset.set_knn_path(self.knn_path)
def get_dataset(self):
return self.dataset
@torch.no_grad()
def reindex(self):
batch_size = self.reindex_batch_size
def get_embeds(data):
embeds = self.model.get_embeds(data, batch_size = batch_size)
return embeds.detach().cpu().numpy()
with memmap(self.documents_path, dtype=np.int32, shape=self.doc_shape) as (doc_pointer
), memmap(self.target_path, dtype=np.int32, shape=self.target_shape) as (target_pointer
), memmap(self.knn_path, dtype=np.int32, shape=(self.num_docs, self.num_evidence), mode='w+') as knn_writer:
if self.use_faiss_ann:
random_indices = np.random.permutation(self.num_docs)[:self.index.num_training]
np_data = torch.from_numpy(doc_pointer[random_indices]).cuda().long()
train_embeds = get_embeds(np_data)
self.index.train(train_embeds)
total_evidence_chunks = math.ceil(self.num_docs / batch_size)
for data_slice in tqdm(chunk(batch_size, self.num_docs), total=total_evidence_chunks, desc='Adding embedding to indexes'):
np_data = torch.from_numpy(doc_pointer[data_slice, :]).cuda().long()
embeds = get_embeds(np_data)
self.index.add(embeds)
total_target_chunks = math.ceil(self.num_targets / batch_size)
for data_slice in tqdm(chunk(batch_size, self.num_targets), total=total_target_chunks, desc='Fetching and storing nearest neighbors'):
np_data = torch.from_numpy(target_pointer[data_slice, :]).cuda().long()
embeds = get_embeds(np_data)
fetch_num_evidences = self.num_evidence + (0 if self.separate_target_and_evidence else 1)
_, evidence_ids = self.index.search(embeds, fetch_num_evidences)
target_ids = np.arange(data_slice.start, data_slice.stop)
if not self.separate_target_and_evidence:
evidence_ids = remove_target_from_evidence(evidence_ids, target_ids)
knn_writer[data_slice, :] = evidence_ids
self.index.reset()
print('reindexing complete')
def forward(self, data):
targets, target_masks, evidences, evidence_masks = data
target_embeds = self.model.get_embeds(targets, masks = target_masks)
loss = self.model(evidences, targets, target_embeds, src_mask = evidence_masks, tgt_mask = target_masks)
return loss
| marge-pytorch-master | marge_pytorch/marge_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'deformable-attention',
packages = find_packages(exclude=[]),
version = '0.0.18',
license='MIT',
description = 'Deformable Attention - from the paper "Vision Transformer with Deformable Attention"',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/deformable-attention',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism'
],
install_requires=[
'einops>=0.4',
'torch>=1.10'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| deformable-attention-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
# tensor helpers
def create_grid_like(t, dim = 0):
h, w, device = *t.shape[-2:], t.device
grid = torch.stack(torch.meshgrid(
torch.arange(w, device = device),
torch.arange(h, device = device),
indexing = 'xy'), dim = dim)
grid.requires_grad = False
grid = grid.type_as(t)
return grid
def normalize_grid(grid, dim = 1, out_dim = -1):
# normalizes a grid to range from -1 to 1
h, w = grid.shape[-2:]
grid_h, grid_w = grid.unbind(dim = dim)
grid_h = 2.0 * grid_h / max(h - 1, 1) - 1.0
grid_w = 2.0 * grid_w / max(w - 1, 1) - 1.0
return torch.stack((grid_h, grid_w), dim = out_dim)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
# continuous positional bias from SwinV2
class CPB(nn.Module):
""" https://arxiv.org/abs/2111.09883v1 """
def __init__(self, dim, *, heads, offset_groups, depth):
super().__init__()
self.heads = heads
self.offset_groups = offset_groups
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(2, dim),
nn.ReLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU()
))
self.mlp.append(nn.Linear(dim, heads // offset_groups))
def forward(self, grid_q, grid_kv):
device, dtype = grid_q.device, grid_kv.dtype
grid_q = rearrange(grid_q, 'h w c -> 1 (h w) c')
grid_kv = rearrange(grid_kv, 'b h w c -> b (h w) c')
pos = rearrange(grid_q, 'b i c -> b i 1 c') - rearrange(grid_kv, 'b j c -> b 1 j c')
bias = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
bias = layer(bias)
bias = rearrange(bias, '(b g) i j o -> b (g o) i j', g = self.offset_groups)
return bias
# main class
class DeformableAttention2D(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
downsample_factor = 4,
offset_scale = None,
offset_groups = None,
offset_kernel_size = 6,
group_queries = True,
group_key_values = True
):
super().__init__()
offset_scale = default(offset_scale, downsample_factor)
assert offset_kernel_size >= downsample_factor, 'offset kernel size must be greater than or equal to the downsample factor'
assert divisible_by(offset_kernel_size - downsample_factor, 2)
offset_groups = default(offset_groups, heads)
assert divisible_by(heads, offset_groups)
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.offset_groups = offset_groups
offset_dims = inner_dim // offset_groups
self.downsample_factor = downsample_factor
self.to_offsets = nn.Sequential(
nn.Conv2d(offset_dims, offset_dims, offset_kernel_size, groups = offset_dims, stride = downsample_factor, padding = (offset_kernel_size - downsample_factor) // 2),
nn.GELU(),
nn.Conv2d(offset_dims, 2, 1, bias = False),
nn.Tanh(),
Scale(offset_scale)
)
self.rel_pos_bias = CPB(dim // 4, offset_groups = offset_groups, heads = heads, depth = 2)
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Conv2d(dim, inner_dim, 1, groups = offset_groups if group_queries else 1, bias = False)
self.to_k = nn.Conv2d(dim, inner_dim, 1, groups = offset_groups if group_key_values else 1, bias = False)
self.to_v = nn.Conv2d(dim, inner_dim, 1, groups = offset_groups if group_key_values else 1, bias = False)
self.to_out = nn.Conv2d(inner_dim, dim, 1)
def forward(self, x, return_vgrid = False):
"""
b - batch
h - heads
x - height
y - width
d - dimension
g - offset groups
"""
heads, b, h, w, downsample_factor, device = self.heads, x.shape[0], *x.shape[-2:], self.downsample_factor, x.device
# queries
q = self.to_q(x)
# calculate offsets - offset MLP shared across all groups
group = lambda t: rearrange(t, 'b (g d) ... -> (b g) d ...', g = self.offset_groups)
grouped_queries = group(q)
offsets = self.to_offsets(grouped_queries)
# calculate grid + offsets
grid =create_grid_like(offsets)
vgrid = grid + offsets
vgrid_scaled = normalize_grid(vgrid)
kv_feats = F.grid_sample(
group(x),
vgrid_scaled,
mode = 'bilinear', padding_mode = 'zeros', align_corners = False)
kv_feats = rearrange(kv_feats, '(b g) d ... -> b (g d) ...', b = b)
# derive key / values
k, v = self.to_k(kv_feats), self.to_v(kv_feats)
# scale queries
q = q * self.scale
# split out heads
q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = heads), (q, k, v))
# query / key similarity
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# relative positional bias
grid = create_grid_like(x)
grid_scaled = normalize_grid(grid, dim = 0)
rel_pos_bias = self.rel_pos_bias(grid_scaled, vgrid_scaled)
sim = sim + rel_pos_bias
# numerical stability
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate and combine heads
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
out = self.to_out(out)
if return_vgrid:
return out, vgrid
return out
| deformable-attention-main | deformable_attention/deformable_attention_2d.py |
from deformable_attention.deformable_attention_1d import DeformableAttention1D
from deformable_attention.deformable_attention_2d import DeformableAttention2D
from deformable_attention.deformable_attention_3d import DeformableAttention3D
DeformableAttention = DeformableAttention2D
| deformable-attention-main | deformable_attention/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def cast_tuple(x, length = 1):
return x if isinstance(x, tuple) else ((x,) * depth)
# tensor helpers
def create_grid_like(t, dim = 0):
f, h, w, device = *t.shape[-3:], t.device
grid = torch.stack(torch.meshgrid(
torch.arange(f, device = device),
torch.arange(h, device = device),
torch.arange(w, device = device),
indexing = 'ij'), dim = dim)
grid.requires_grad = False
grid = grid.type_as(t)
return grid
def normalize_grid(grid, dim = 1, out_dim = -1):
# normalizes a grid to range from -1 to 1
f, h, w = grid.shape[-3:]
grid_f, grid_h, grid_w = grid.unbind(dim = dim)
grid_f = 2.0 * grid_f / max(f - 1, 1) - 1.0
grid_h = 2.0 * grid_h / max(h - 1, 1) - 1.0
grid_w = 2.0 * grid_w / max(w - 1, 1) - 1.0
return torch.stack((grid_f, grid_h, grid_w), dim = out_dim)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.register_buffer('scale', torch.tensor(scale, dtype = torch.float32))
def forward(self, x):
return x * rearrange(self.scale, 'c -> 1 c 1 1 1')
# continuous positional bias from SwinV2
class CPB(nn.Module):
""" https://arxiv.org/abs/2111.09883v1 """
def __init__(self, dim, *, heads, offset_groups, depth):
super().__init__()
self.heads = heads
self.offset_groups = offset_groups
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(3, dim),
nn.ReLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU()
))
self.mlp.append(nn.Linear(dim, heads // offset_groups))
def forward(self, grid_q, grid_kv):
device, dtype = grid_q.device, grid_kv.dtype
grid_q = rearrange(grid_q, '... c -> 1 (...) c')
grid_kv = rearrange(grid_kv, 'b ... c -> b (...) c')
pos = rearrange(grid_q, 'b i c -> b i 1 c') - rearrange(grid_kv, 'b j c -> b 1 j c')
bias = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
bias = layer(bias)
bias = rearrange(bias, '(b g) i j o -> b (g o) i j', g = self.offset_groups)
return bias
# main class
class DeformableAttention3D(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
downsample_factor = 4,
offset_scale = None,
offset_groups = None,
offset_kernel_size = 6,
group_queries = True,
group_key_values = True
):
super().__init__()
downsample_factor = cast_tuple(downsample_factor, length = 3)
offset_scale = default(offset_scale, downsample_factor)
offset_conv_padding = tuple(map(lambda x: (x[0] - x[1]) / 2, zip(offset_kernel_size, downsample_factor)))
assert all([(padding > 0 and padding.is_integer()) for padding in offset_conv_padding])
offset_groups = default(offset_groups, heads)
assert divisible_by(heads, offset_groups)
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.offset_groups = offset_groups
offset_dims = inner_dim // offset_groups
self.downsample_factor = downsample_factor
self.to_offsets = nn.Sequential(
nn.Conv3d(offset_dims, offset_dims, offset_kernel_size, groups = offset_dims, stride = downsample_factor, padding = tuple(map(int, offset_conv_padding))),
nn.GELU(),
nn.Conv3d(offset_dims, 3, 1, bias = False),
nn.Tanh(),
Scale(offset_scale)
)
self.rel_pos_bias = CPB(dim // 4, offset_groups = offset_groups, heads = heads, depth = 2)
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Conv3d(dim, inner_dim, 1, groups = offset_groups if group_queries else 1, bias = False)
self.to_k = nn.Conv3d(dim, inner_dim, 1, groups = offset_groups if group_key_values else 1, bias = False)
self.to_v = nn.Conv3d(dim, inner_dim, 1, groups = offset_groups if group_key_values else 1, bias = False)
self.to_out = nn.Conv3d(inner_dim, dim, 1)
def forward(self, x, return_vgrid = False):
"""
b - batch
h - heads
f - frames
x - height
y - width
d - dimension
g - offset groups
"""
heads, b, f, h, w, downsample_factor, device = self.heads, x.shape[0], *x.shape[-3:], self.downsample_factor, x.device
# queries
q = self.to_q(x)
# calculate offsets - offset MLP shared across all groups
group = lambda t: rearrange(t, 'b (g d) ... -> (b g) d ...', g = self.offset_groups)
grouped_queries = group(q)
offsets = self.to_offsets(grouped_queries)
# calculate grid + offsets
grid = create_grid_like(offsets)
vgrid = grid + offsets
vgrid_scaled = normalize_grid(vgrid)
kv_feats = F.grid_sample(
group(x),
vgrid_scaled,
mode = 'bilinear', padding_mode = 'zeros', align_corners = False)
kv_feats = rearrange(kv_feats, '(b g) d ... -> b (g d) ...', b = b)
# derive key / values
k, v = self.to_k(kv_feats), self.to_v(kv_feats)
# scale queries
q = q * self.scale
# split out heads
q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = heads), (q, k, v))
# query / key similarity
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# relative positional bias
grid = create_grid_like(x)
grid_scaled = normalize_grid(grid, dim = 0)
rel_pos_bias = self.rel_pos_bias(grid_scaled, vgrid_scaled)
sim = sim + rel_pos_bias
# numerical stability
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate and combine heads
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (f x y) d -> b (h d) f x y', f = f, x = h, y = w)
out = self.to_out(out)
if return_vgrid:
return out, vgrid
return out
| deformable-attention-main | deformable_attention/deformable_attention_3d.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops.layers.torch import Rearrange
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
# tensor helpers
def grid_sample_1d(feats, grid, *args, **kwargs):
# does 1d grid sample by reshaping it to 2d
grid = rearrange(grid, '... -> ... 1 1')
grid = F.pad(grid, (0, 1), value = 0.)
feats = rearrange(feats, '... -> ... 1')
out = F.grid_sample(feats, grid, **kwargs)
return rearrange(out, '... 1 -> ...')
def normalize_grid(arange, dim = 1, out_dim = -1):
# normalizes 1d sequence to range of -1 to 1
n = arange.shape[-1]
return 2.0 * arange / max(n - 1, 1) - 1.0
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
# continuous positional bias from SwinV2
class CPB(nn.Module):
""" https://arxiv.org/abs/2111.09883v1 """
def __init__(self, dim, *, heads, offset_groups, depth, log_distance = True):
super().__init__()
self.heads = heads
self.offset_groups = offset_groups
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
nn.ReLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU()
))
self.mlp.append(nn.Linear(dim, heads // offset_groups))
def forward(self, grid_q, grid_kv):
device, dtype = grid_q.device, grid_kv.dtype
grid_q = rearrange(grid_q, 'n -> 1 n')
grid_kv = rearrange(grid_kv, 'b n -> b n')
pos = rearrange(grid_q, 'b i -> b i 1 1') - rearrange(grid_kv, 'b j -> b 1 j 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
bias = pos
for layer in self.mlp:
bias = layer(bias)
bias = rearrange(bias, '(b g) i j o -> b (g o) i j', g = self.offset_groups)
return bias
# main class
class DeformableAttention1D(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
downsample_factor = 4,
offset_scale = None,
offset_groups = None,
offset_kernel_size = 6,
cpb_log_distance = True,
group_queries = True,
group_key_values = True
):
super().__init__()
offset_scale = default(offset_scale, downsample_factor)
assert offset_kernel_size >= downsample_factor, 'offset kernel size must be greater than or equal to the downsample factor'
assert divisible_by(offset_kernel_size - downsample_factor, 2)
offset_groups = default(offset_groups, heads)
assert divisible_by(heads, offset_groups)
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.offset_groups = offset_groups
offset_dims = inner_dim // offset_groups
self.downsample_factor = downsample_factor
self.to_offsets = nn.Sequential(
nn.Conv1d(offset_dims, offset_dims, offset_kernel_size, groups = offset_dims, stride = downsample_factor, padding = (offset_kernel_size - downsample_factor) // 2),
nn.GELU(),
nn.Conv1d(offset_dims, 1, 1, bias = False),
Rearrange('b 1 n -> b n'),
nn.Tanh(),
Scale(offset_scale)
)
self.rel_pos_bias = CPB(dim // 4, offset_groups = offset_groups, heads = heads, depth = 2, log_distance = cpb_log_distance)
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Conv1d(dim, inner_dim, 1, groups = offset_groups if group_queries else 1, bias = False)
self.to_k = nn.Conv1d(dim, inner_dim, 1, groups = offset_groups if group_key_values else 1, bias = False)
self.to_v = nn.Conv1d(dim, inner_dim, 1, groups = offset_groups if group_key_values else 1, bias = False)
self.to_out = nn.Conv1d(inner_dim, dim, 1)
def forward(self, x, return_vgrid = False):
"""
b - batch
h - heads
n - sequence dimension
d - dimension
g - offset groups
"""
heads, b, n, downsample_factor, device = self.heads, x.shape[0], x.shape[-1], self.downsample_factor, x.device
# queries
q = self.to_q(x)
# calculate offsets - offset MLP shared across all groups
group = lambda t: rearrange(t, 'b (g d) n -> (b g) d n', g = self.offset_groups)
grouped_queries = group(q)
offsets = self.to_offsets(grouped_queries)
# calculate grid + offsets
grid = torch.arange(offsets.shape[-1], device = device)
vgrid = grid + offsets
vgrid_scaled = normalize_grid(vgrid)
kv_feats = grid_sample_1d(
group(x),
vgrid_scaled,
mode = 'bilinear', padding_mode = 'zeros', align_corners = False)
kv_feats = rearrange(kv_feats, '(b g) d n -> b (g d) n', b = b)
# derive key / values
k, v = self.to_k(kv_feats), self.to_v(kv_feats)
# scale queries
q = q * self.scale
# split out heads
q, k, v = map(lambda t: rearrange(t, 'b (h d) n -> b h n d', h = heads), (q, k, v))
# query / key similarity
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# relative positional bias
seq_range = torch.arange(n, device = device)
seq_scaled = normalize_grid(seq_range, dim = 0)
rel_pos_bias = self.rel_pos_bias(seq_scaled, vgrid_scaled)
sim = sim + rel_pos_bias
# numerical stability
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate and combine heads
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n')
out = self.to_out(out)
if return_vgrid:
return out, vgrid
return out
| deformable-attention-main | deformable_attention/deformable_attention_1d.py |
from setuptools import setup, find_packages
setup(
name = 'hourglass-transformer-pytorch',
packages = find_packages(),
version = '0.0.6',
license='MIT',
description = 'Hourglass Transformer',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/hourglass-transformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'einops',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| hourglass-transformer-pytorch-main | setup.py |
from hourglass_transformer_pytorch import HourglassTransformerLM
from hourglass_transformer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = HourglassTransformerLM(
num_tokens = 256,
dim = 512,
max_seq_len = SEQ_LEN,
depth = (4, 2, 4),
shorten_factor = 2,
heads = 8
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
| hourglass-transformer-pytorch-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value = 0):
super().__init__()
assert hasattr(net, 'max_seq_len'), 'your transformer class must have max_seq_len set to the maximum sequence length'
self.pad_value = pad_value
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_thres = 0.9, **kwargs):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = (out == eos_token)
if is_eos_token.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
logits = self.net(x_inp, **kwargs)
return F.cross_entropy(logits.transpose(1, 2), x_labels, ignore_index = self.pad_value)
| hourglass-transformer-pytorch-main | hourglass_transformer_pytorch/autoregressive_wrapper.py |
from hourglass_transformer_pytorch.hourglass_transformer_pytorch import HourglassTransformerLM, HourglassTransformer
| hourglass-transformer-pytorch-main | hourglass_transformer_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pad_to_multiple(tensor, multiple, dim = -1, value = 0):
seq_len = tensor.shape[dim]
m = seq_len / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) * multiple - seq_len
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
def cast_tuple(val, depth = 1):
return val if isinstance(val, tuple) else ((val,) * depth)
# factory
def get_hourglass_transformer(
dim,
*,
depth,
shorten_factor,
attn_resampling,
updown_sample_type,
**kwargs
):
assert isinstance(depth, int) or (isinstance(depth, tuple) and len(depth) == 3), 'depth must be either an integer or a tuple of 3, indicating (pre_transformer_depth, <nested-hour-glass-config>, post_transformer_depth)'
assert not (isinstance(depth, int) and shorten_factor), 'there does not need to be a shortening factor when only a single transformer block is indicated (depth of one integer value)'
if isinstance(depth, int):
return Transformer(dim = dim, depth = depth, **kwargs)
return HourglassTransformer(dim = dim, depth = depth, shorten_factor = shorten_factor, attn_resampling = attn_resampling, updown_sample_type = updown_sample_type, **kwargs)
# up and down sample classes
class NaiveDownsample(nn.Module):
def __init__(self, shorten_factor):
super().__init__()
self.shorten_factor = shorten_factor
def forward(self, x):
return reduce(x, 'b (n s) d -> b n d', 'mean', s = self.shorten_factor)
class NaiveUpsample(nn.Module):
def __init__(self, shorten_factor):
super().__init__()
self.shorten_factor = shorten_factor
def forward(self, x):
return repeat(x, 'b n d -> b (n s) d', s = self.shorten_factor)
class LinearDownsample(nn.Module):
def __init__(self, dim, shorten_factor):
super().__init__()
self.proj = nn.Linear(dim * shorten_factor, dim)
self.shorten_factor = shorten_factor
def forward(self, x):
x = rearrange(x, 'b (n s) d -> b n (s d)', s = self.shorten_factor)
return self.proj(x)
class LinearUpsample(nn.Module):
def __init__(self, dim, shorten_factor):
super().__init__()
self.proj = nn.Linear(dim, dim * shorten_factor)
self.shorten_factor = shorten_factor
def forward(self, x):
x = self.proj(x)
return rearrange(x, 'b n (s d) -> b (n s) d', s = self.shorten_factor)
# classes
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs) + x
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
causal = False
):
super().__init__()
self.heads = heads
self.causal = causal
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, context = None, mask = None):
h, device = self.heads, x.device
kv_input = default(context, x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
sim = sim.masked_fill(~mask, mask_value)
if self.causal:
i, j = sim.shape[-2:]
mask = torch.ones(i, j, device = device, dtype = torch.bool).triu_(j - i + 1)
mask = rearrange(mask, 'i j -> () () i j')
sim = sim.masked_fill(mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
def FeedForward(dim, mult = 4, dropout = 0.):
return nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
# transformer classes
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
causal = False,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
norm_out = False
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNormResidual(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout, causal = causal)),
PreNormResidual(dim, FeedForward(dim, mult = ff_mult, dropout = ff_dropout))
]))
self.norm = nn.LayerNorm(dim) if norm_out else nn.Identity()
def forward(self, x, context = None, mask = None):
for attn, ff in self.layers:
x = attn(x, context = context, mask = mask)
x = ff(x)
return self.norm(x)
class HourglassTransformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
shorten_factor = 2,
attn_resampling = True,
updown_sample_type = 'naive',
heads = 8,
dim_head = 64,
causal = False,
norm_out = False
):
super().__init__()
assert len(depth) == 3, 'depth should be a tuple of length 3'
assert updown_sample_type in {'naive', 'linear'}, 'downsample / upsample type must be either naive (average pool and repeat) or linear (linear projection and reshape)'
pre_layers_depth, valley_depth, post_layers_depth = depth
if isinstance(shorten_factor, (tuple, list)):
shorten_factor, *rest_shorten_factor = shorten_factor
elif isinstance(valley_depth, int):
shorten_factor, rest_shorten_factor = shorten_factor, None
else:
shorten_factor, rest_shorten_factor = shorten_factor, shorten_factor
transformer_kwargs = dict(
dim = dim,
heads = heads,
dim_head = dim_head
)
self.causal = causal
self.shorten_factor = shorten_factor
if updown_sample_type == 'naive':
self.downsample = NaiveDownsample(shorten_factor)
self.upsample = NaiveUpsample(shorten_factor)
elif updown_sample_type == 'linear':
self.downsample = LinearDownsample(dim, shorten_factor)
self.upsample = LinearUpsample(dim, shorten_factor)
else:
raise ValueError(f'unknown updown_sample_type keyword value - must be either naive or linear for now')
self.valley_transformer = get_hourglass_transformer(
shorten_factor = rest_shorten_factor,
depth = valley_depth,
attn_resampling = attn_resampling,
updown_sample_type = updown_sample_type,
causal = causal,
**transformer_kwargs
)
self.attn_resampling_pre_valley = Transformer(depth = 1, **transformer_kwargs) if attn_resampling else None
self.attn_resampling_post_valley = Transformer(depth = 1, **transformer_kwargs) if attn_resampling else None
self.pre_transformer = Transformer(depth = pre_layers_depth, causal = causal, **transformer_kwargs)
self.post_transformer = Transformer(depth = post_layers_depth, causal = causal, **transformer_kwargs)
self.norm_out = nn.LayerNorm(dim) if norm_out else nn.Identity()
def forward(self, x, mask = None):
# b : batch, n : sequence length, d : feature dimension, s : shortening factor
s, b, n = self.shorten_factor, *x.shape[:2]
# top half of hourglass, pre-transformer layers
x = self.pre_transformer(x, mask = mask)
# pad to multiple of shortening factor, in preparation for pooling
x = pad_to_multiple(x, s, dim = -2)
if exists(mask):
padded_mask = pad_to_multiple(mask, s, dim = -1, value = False)
# save the residual, and for "attention resampling" at downsample and upsample
x_residual = x.clone()
# if autoregressive, do the shift by shortening factor minus one
if self.causal:
shift = s - 1
x = F.pad(x, (0, 0, shift, -shift), value = 0.)
if exists(mask):
padded_mask = F.pad(padded_mask, (shift, -shift), value = False)
# naive average pool
downsampled = self.downsample(x)
if exists(mask):
downsampled_mask = reduce(padded_mask, 'b (n s) -> b n', 'sum', s = s) > 0
else:
downsampled_mask = None
# pre-valley "attention resampling" - they have the pooled token in each bucket attend to the tokens pre-pooled
if exists(self.attn_resampling_pre_valley):
if exists(mask):
attn_resampling_mask = rearrange(padded_mask, 'b (n s) -> (b n) s', s = s)
else:
attn_resampling_mask = None
downsampled = self.attn_resampling_pre_valley(
rearrange(downsampled, 'b n d -> (b n) () d'),
rearrange(x, 'b (n s) d -> (b n) s d', s = s),
mask = attn_resampling_mask
)
downsampled = rearrange(downsampled, '(b n) () d -> b n d', b = b)
# the "valley" - either a regular transformer or another hourglass
x = self.valley_transformer(downsampled, mask = downsampled_mask)
valley_out = x.clone()
# naive repeat upsample
x = self.upsample(x)
# add the residual
x = x + x_residual
# post-valley "attention resampling"
if exists(self.attn_resampling_post_valley):
x = self.attn_resampling_post_valley(
rearrange(x, 'b (n s) d -> (b n) s d', s = s),
rearrange(valley_out, 'b n d -> (b n) () d')
)
x = rearrange(x, '(b n) s d -> b (n s) d', b = b)
# bring sequence back to original length, if it were padded for pooling
x = x[:, :n]
# post-valley transformers
x = self.post_transformer(x, mask = mask)
return self.norm_out(x)
# main class
class HourglassTransformerLM(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_seq_len,
depth,
shorten_factor = None,
heads = 8,
dim_head = 64,
attn_resampling = True,
updown_sample_type = 'naive',
causal = True
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.transformer = get_hourglass_transformer(
dim = dim,
depth = depth,
shorten_factor = shorten_factor,
attn_resampling = attn_resampling,
updown_sample_type = updown_sample_type,
dim_head = dim_head,
heads = heads,
causal = causal,
norm_out = True
)
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x, mask = None):
device = x.device
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(x.shape[-2], device = device))
x = x + rearrange(pos_emb, 'n d -> () n d')
x = self.transformer(x, mask = mask)
return self.to_logits(x)
| hourglass-transformer-pytorch-main | hourglass_transformer_pytorch/hourglass_transformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'htm-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Hierarchical Transformer Memory - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/htm-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention-mechanism',
'memory'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| HTM-pytorch-main | setup.py |
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pad_to_multiple(t, multiple, dim = -2, value = 0.):
seq_len = t.shape[dim]
pad_to_len = ceil(seq_len / multiple) * multiple
remainder = pad_to_len - seq_len
if remainder == 0:
return t
zeroes = (0, 0) * (-dim - 1)
padded_t = F.pad(t, (*zeroes, remainder, 0), value = value)
return padded_t
# positional encoding
class SinusoidalPosition(nn.Module):
def __init__(
self,
dim,
min_timescale = 2.,
max_timescale = 1e4
):
super().__init__()
freqs = torch.arange(0, dim, min_timescale)
inv_freqs = max_timescale ** (-freqs / dim)
self.register_buffer('inv_freqs', inv_freqs)
def forward(self, x):
seq_len = x.shape[-2]
seq = torch.arange(seq_len - 1, -1, -1.)
sinusoidal_inp = rearrange(seq, 'n -> n ()') * rearrange(self.inv_freqs, 'd -> () d')
pos_emb = torch.cat((sinusoidal_inp.sin(), sinusoidal_inp.cos()), dim = -1)
return pos_emb
# multi-head attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(
self,
x,
mems,
mask = None
):
h = self.heads
q, k, v = self.to_q(x), *self.to_kv(mems).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b ... (h d) -> (b h) ... d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b m i d, b m i j d -> b m i j', q, k)
if exists(mask):
mask = repeat(mask, 'b ... -> (b h) ...', h = h)
mask_value = -torch.finfo(sim.dtype).max
sim = sim.masked_fill(~mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('... i j, ... i j d -> ... i d', attn, v)
out = rearrange(out, '(b h) ... d -> b ... (h d)', h = h)
return self.to_out(out)
# main class
class HTMAttention(nn.Module):
def __init__(
self,
dim,
heads,
topk_mems = 2,
mem_chunk_size = 32,
dim_head = 64,
add_pos_enc = True,
eps = 1e-5
):
super().__init__()
self.dim = dim
self.eps = eps
self.scale = dim ** -0.5
self.to_summary_queries = nn.Linear(dim, dim)
self.to_summary_keys = nn.Linear(dim, dim)
self.attn = Attention(dim = dim, heads = heads, dim_head = dim_head)
self.topk_mems = topk_mems
self.mem_chunk_size = mem_chunk_size
self.pos_emb = SinusoidalPosition(dim = dim) if add_pos_enc else None
def forward(
self,
queries,
memories,
mask = None,
chunk_attn_mask = None
):
dim, query_len, mem_chunk_size, topk_mems, scale, eps = self.dim, queries.shape[1], self.mem_chunk_size, self.topk_mems, self.scale, self.eps
# pad memories, and the memory mask, if needed
# and then divide into chunks
memories = pad_to_multiple(memories, mem_chunk_size, dim = -2, value = 0.)
memories = rearrange(memories, 'b (n c) d -> b n c d', c = mem_chunk_size)
if exists(mask):
mask = pad_to_multiple(mask, mem_chunk_size, dim = -1, value = False)
mask = rearrange(mask, 'b (n c) -> b n c', c = mem_chunk_size)
# summarize memories through mean-pool, accounting for mask
if exists(mask):
mean_mask = rearrange(mask, '... -> ... ()')
memories = memories.masked_fill(~mean_mask, 0.)
numer = memories.sum(dim = 2)
denom = mean_mask.sum(dim = 2)
summarized_memories = numer / (denom + eps)
else:
summarized_memories = memories.mean(dim = 2)
# derive queries and summarized memory keys
summary_queries = self.to_summary_queries(queries)
summary_keys = self.to_summary_keys(summarized_memories.detach())
# do a single head attention over summary keys
sim = einsum('b i d, b j d -> b i j', summary_queries, summary_keys) * scale
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
chunk_mask = mask.any(dim = 2)
chunk_mask = rearrange(chunk_mask, 'b j -> b () j')
sim = sim.masked_fill(~chunk_mask, mask_value)
if exists(chunk_attn_mask):
sim = sim.masked_fill(~chunk_attn_mask, mask_value)
topk_logits, topk_indices = sim.topk(k = topk_mems, dim = -1)
weights = topk_logits.softmax(dim = -1)
# ready queries for in-memory attention
queries = repeat(queries, 'b n d -> b k n d', k = topk_mems)
# select the topk memories
memories = repeat(memories, 'b m j d -> b m i j d', i = query_len)
mem_topk_indices = repeat(topk_indices, 'b i m -> b m i j d', j = mem_chunk_size, d = dim)
selected_memories = memories.gather(1, mem_topk_indices)
# positional encoding
if exists(self.pos_emb):
pos_emb = self.pos_emb(memories)
selected_memories = selected_memories + rearrange(pos_emb, 'n d -> () () () n d')
# select the mask
selected_mask = None
if exists(mask):
mask = repeat(mask, 'b m j -> b m i j', i = query_len)
mask_topk_indices = repeat(topk_indices, 'b i m -> b m i j', j = mem_chunk_size)
selected_mask = mask.gather(1, mask_topk_indices)
# now do in-memory attention
within_mem_output = self.attn(
queries,
selected_memories.detach(),
mask = selected_mask
)
# weight the in-memory attention outputs
weighted_output = within_mem_output * rearrange(weights, 'b i m -> b m i ()')
output = weighted_output.sum(dim = 1)
return output
# HTM Block
class HTMBlock(nn.Module):
def __init__(self, dim, **kwargs):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.attn = HTMAttention(dim = dim, **kwargs)
def forward(
self,
queries,
memories,
**kwargs
):
queries = self.norm(queries)
out = self.attn(queries, memories, **kwargs) + queries
return out
| HTM-pytorch-main | htm_pytorch/htm_pytorch.py |
from htm_pytorch.htm_pytorch import HTMAttention, HTMBlock
| HTM-pytorch-main | htm_pytorch/__init__.py |
from setuptools import find_packages
import subprocess
from glob import glob
from distutils.core import setup, Extension
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
def pkgconfig(package, kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
output = subprocess.getoutput(
'pkg-config --cflags --libs {}'.format(package))
if 'not found' in output:
raise Exception(f"Could not find required package: {package}.")
for token in output.strip().split():
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
return kw
sources = ['./libffcv/libffcv.cpp']
extension_kwargs = {
'sources': sources,
'include_dirs': []
}
extension_kwargs = pkgconfig('opencv4', extension_kwargs)
extension_kwargs = pkgconfig('libturbojpeg', extension_kwargs)
extension_kwargs['libraries'].append('pthread')
libffcv = Extension('ffcv._libffcv',
**extension_kwargs)
setup(name='ffcv',
version='0.0.3rc1',
description=' FFCV: Fast Forward Computer Vision ',
author='MadryLab',
author_email='[email protected]',
url='https://github.com/libffcv/ffcv',
license_files = ('LICENSE.txt',),
packages=find_packages(),
long_description=long_description,
long_description_content_type='text/markdown',
ext_modules=[libffcv],
install_requires=[
'terminaltables',
'pytorch_pfn_extras',
'fastargs',
'matplotlib',
'sklearn',
'imgcat',
'pandas',
'assertpy',
'tqdm',
'psutil',
'webdataset',
]
)
| ffcv-main | setup.py |
from tempfile import NamedTemporaryFile
import torch as ch
from tqdm import tqdm
import time
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, NDArrayField
from ffcv import Loader
from ffcv.fields.basics import IntDecoder
from ffcv.fields.ndarray import NDArrayDecoder
from ffcv.loader.loader import OrderOption
from ffcv.transforms import ToDevice, ToTensor, Squeeze
import time
class DummyArrayDataset(Dataset):
def __init__(self, n_samples, shape):
self.n_samples = n_samples
self.shape = shape
def __len__(self):
return self.n_samples
def __getitem__(self, index):
if index >= self.n_samples:
raise IndexError()
np.random.seed(index)
return (np.random.rand(50000) > 0.5).astype('bool'), np.random.rand(50000).astype('float32'), index
def run_experiment_cuda(weight, loader, sync=False):
total = 0.
X = ch.empty(2048, 50_000, device=weight.device)
for X_bool, _, __ in tqdm(loader):
if sync: ch.cuda.synchronize()
X.copy_(X_bool)
total += X @ weight
total += X @ weight
total += X @ weight
return total.sum(0)
def run_cuda(weight, sync):
n_samples, shape = (2048 * 10, (50000,))
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyArrayDataset(n_samples, shape)
writer = DatasetWriter(name, {
'mask': NDArrayField(dtype=np.dtype('bool'), shape=(50_000,)),
'targets': NDArrayField(dtype=np.dtype('float32'), shape=(50_000,)),
'idx': IntField()
})
writer.from_indexed_dataset(dataset)
loader = Loader(
name,
batch_size=2048,
num_workers=10,
order=OrderOption.QUASI_RANDOM,
indices=np.arange(n_samples),
drop_last=False,
os_cache=True,
pipelines={
'mask': [NDArrayDecoder(), ToTensor(), ToDevice(ch.device('cuda:0'), non_blocking=False)],
'targets': [NDArrayDecoder(), ToTensor(), ToDevice(ch.device('cuda:0'), non_blocking=False)],
'idx': [IntDecoder(), ToTensor(), Squeeze(), ToDevice(ch.device('cuda:0'), non_blocking=False)]
})
return run_experiment_cuda(weight, loader, sync)
def test_cuda():
weight = ch.randn(50_000, 50_000).cuda()
async_1 = run_cuda(weight, False)
sync_1 = run_cuda(weight, True)
sync_2 = run_cuda(weight, True)
print(async_1)
print(sync_1)
print(sync_2)
print(ch.abs(sync_1 - sync_2).max())
print(ch.abs(sync_1 - async_1).max())
assert ch.abs(sync_1 - sync_2).max().cpu().item() < 10., 'Sync-sync mismatch'
assert ch.abs(async_1 - sync_1).max().cpu().item() < 10., 'Async-sync mismatch'
# test_cuda() | ffcv-main | tests/test_cuda_nonblocking.py |
from collections import defaultdict
from tempfile import TemporaryDirectory
from os import path
from typing import Counter
import pytest
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset, distributed
from torch.multiprocessing import spawn, Queue
from torch.distributed import init_process_group
from ffcv.loader.loader import ORDER_TYPE, OrderOption
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, BytesField
from ffcv import Loader
class DummyDataset(Dataset):
def __init__(self, l):
self.l = l
def __len__(self):
return self.l
def __getitem__(self, index):
if index > self.l:
raise IndexError()
return (index, np.sin(np.array([index])).view('<u1'))
def process_work(rank, world_size, fname, order, sync_fname, out_folder, indices):
sync_url = f'file://{sync_fname}'
if world_size > 1:
init_process_group('nccl', sync_url, rank=rank, world_size=world_size)
loader = Loader(fname, 8, num_workers=2, order=order, drop_last=False,
distributed=world_size > 1, indices=indices)
result = []
for _ in range(3):
content = np.concatenate([x[0].numpy().reshape(-1).copy() for x in loader])
result.append(content)
result = np.stack(result)
np.save(path.join(out_folder, f"result-{rank}.npy"), result)
def prep_and_run_test(num_workers, order, with_indices=False):
length = 600
indices = None
if with_indices:
indices = np.random.choice(length, length//2, replace=False)
with TemporaryDirectory() as folder:
name = path.join(folder, 'dataset.beton')
sync_file = path.join(folder, 'share')
dataset = DummyDataset(length)
writer = DatasetWriter(name, {
'index': IntField(),
'value': BytesField()
})
writer.from_indexed_dataset(dataset)
args = (num_workers, name, order, sync_file, folder, indices)
if num_workers > 1:
spawn(process_work, nprocs=num_workers, args=args)
else:
process_work(*((0, ) + args))
results = []
for r in range(num_workers):
array = np.load(path.join(folder,f"result-{r}.npy"))
results.append(array)
results = np.concatenate(results, 1)
# For each epoch
for i in range(results.shape[0]):
if not with_indices:
if order == OrderOption.SEQUENTIAL and i < results.shape[0] - 1:
assert_that((results[i] == results[i + 1]).all()).is_true()
if order != OrderOption.SEQUENTIAL and i < results.shape[0] - 1:
assert_that((results[i] == results[i + 1]).all()).is_false()
epoch_content = Counter(results[i])
indices_gotten = np.array(sorted(list(epoch_content.keys())))
assert_that(np.all(np.arange(length) == indices_gotten)).is_true()
assert_that(min(epoch_content.values())).is_equal_to(1)
assert_that(max(epoch_content.values())).is_less_than_or_equal_to(2)
else:
assert_that(set(results[i])).is_equal_to(set(indices))
def test_traversal_sequential_1():
prep_and_run_test(1, OrderOption.SEQUENTIAL)
def test_traversal_sequential_2():
prep_and_run_test(2, OrderOption.SEQUENTIAL)
def test_traversal_sequential_3():
prep_and_run_test(3, OrderOption.SEQUENTIAL)
def test_traversal_sequential_4():
prep_and_run_test(4, OrderOption.SEQUENTIAL)
def test_traversal_random_1():
prep_and_run_test(1, OrderOption.RANDOM)
def test_traversal_random_2():
prep_and_run_test(2, OrderOption.RANDOM)
def test_traversal_random_3():
prep_and_run_test(3, OrderOption.RANDOM)
def test_traversal_random_4():
prep_and_run_test(4, OrderOption.RANDOM)
def test_traversal_quasirandom_1():
prep_and_run_test(1, OrderOption.QUASI_RANDOM)
@pytest.mark.skip()
def test_traversal_quasirandom_2():
prep_and_run_test(2, OrderOption.QUASI_RANDOM)
@pytest.mark.skip()
def test_traversal_quasirandom_3():
prep_and_run_test(3, OrderOption.QUASI_RANDOM)
@pytest.mark.skip()
def test_traversal_quasirandom_4():
prep_and_run_test(4, OrderOption.QUASI_RANDOM)
def test_traversal_sequential_distributed_with_indices():
prep_and_run_test(2, OrderOption.SEQUENTIAL, True)
def test_traversal_random_distributed_with_indices():
prep_and_run_test(2, OrderOption.RANDOM, True)
@pytest.mark.skip()
def test_traversal_quasi_random_distributed_with_indices():
prep_and_run_test(2, OrderOption.QUASI_RANDOM, True) | ffcv-main | tests/test_traversal_orders.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.pipeline.operation import Operation
from ffcv.transforms.ops import ToTensor
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.loader import Loader
from ffcv.fields import IntField, FloatField, BytesField
from ffcv.fields.basics import FloatDecoder
from ffcv.pipeline.state import State
from test_writer import DummyDataset
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
class Doubler(Operation):
def generate_code(self) -> Callable:
def code(x, dst):
dst[:x.shape[0]] = x * 2
return dst
return code
def declare_state_and_memory(self, previous_state: State):
return (previous_state, AllocationQuery(previous_state.shape, previous_state.dtype, previous_state.device))
def run_test(bs, exp_length, drop_last=True):
length = 600
batch_size = bs
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
drop_last=drop_last,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()]
})
assert_that(loader).is_length(exp_length)
another_partial = drop_last
for (batch, _) in loader:
if batch.shape[0] != bs:
assert_that(another_partial).is_false()
another_partial = True
def test_partial():
run_test(7, 85, True)
def test_not_partial():
run_test(7, 86, False)
def test_not_partial_multiple():
run_test(60, 10, False)
def test_partial_multiple():
run_test(60, 10, True)
| ffcv-main | tests/test_partial_batches.py |
import pytest
import numpy as np
from uuid import uuid4
from ffcv.fields.ndarray import NDArrayField, NDArrayDecoder
from ffcv.writer import DatasetWriter
from ffcv.loader import Loader, OrderOption
from tempfile import NamedTemporaryFile
class StringDecoder(NDArrayDecoder):
pass
class StringField(NDArrayField):
def __init__(self, max_len: int, pad_char='\0'):
self.max_len = max_len
self.pad_char = pad_char
super().__init__(np.dtype('uint8'), (max_len,))
def encode(self, destination, field, malloc):
padded_field = (field + self.pad_char * self.max_len)[:self.max_len]
field = np.frombuffer(padded_field.encode('ascii'), dtype='uint8')
return super().encode(destination, field, malloc)
MAX_STRING_SIZE = 100
class CaptionDataset:
def __init__(self, N):
self.captions = [str(uuid4())[:np.random.randint(50)] for _ in range(N)]
def __getitem__(self, idx):
return (self.captions[idx],)
def __len__(self):
return len(self.captions)
def test_string_field():
dataset = CaptionDataset(100)
with NamedTemporaryFile() as handle:
writer = DatasetWriter(handle.name, {
'label': StringField(MAX_STRING_SIZE)
})
writer.from_indexed_dataset(dataset)
loader = Loader(handle.name,
batch_size=10,
num_workers=2,
order=OrderOption.RANDOM,
pipelines={
'label': [StringDecoder()]
},
custom_fields={
'label': StringField
})
all_caps = []
for x, in loader:
for cap in x:
all_caps.append(cap.tobytes().decode('ascii').replace('\0', ''))
assert set(all_caps) == set(dataset.captions)
def test_no_custom_field():
dataset = CaptionDataset(100)
with NamedTemporaryFile() as handle:
writer = DatasetWriter(handle.name, {
'label': StringField(MAX_STRING_SIZE)
})
writer.from_indexed_dataset(dataset)
with pytest.raises(ValueError):
Loader(handle.name,
batch_size=10,
num_workers=2,
order=OrderOption.RANDOM,
pipelines={
'label': [StringDecoder()]
})
| ffcv-main | tests/test_custom_field.py |
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, RGBImageField
from ffcv.fields.decoders import SimpleRGBImageDecoder
from ffcv.loader import Loader
from ffcv.pipeline.compiler import Compiler
from ffcv.transforms import ToTorchImage, ToTensor, NormalizeImage, View, ToDevice
class DummyDataset(Dataset):
def __init__(self, length, height, width):
self.length = length
self.height = height
self.width = width
def __len__(self):
return self.length
def __getitem__(self, index):
if index > self.length:
raise IndexError
dims = (self.height, self.width, 3)
image_data = ((np.ones(dims) * index) % 255).astype('uint8')
result = index,image_data
return result
def test_cpu_normalization():
dataset = DummyDataset(500, 25, 30)
with NamedTemporaryFile() as handle:
name = handle.name
fields = {
'index': IntField(),
'value': RGBImageField(write_mode='raw', jpeg_quality=95)
}
writer = DatasetWriter(name, fields, num_workers=2)
mean = np.array([0, 1, 2])
std = np.array([1, 10, 20])
writer.from_indexed_dataset(dataset, chunksize=5)
loader = Loader(name, batch_size=5, num_workers=2,
pipelines={
'value': [
SimpleRGBImageDecoder(),
NormalizeImage(mean, std, np.float16),
View(np.float16),
ToTensor(),
ToTorchImage(),
]
})
ix = 0
for res in tqdm(loader):
index, images = res
for image in images:
image = image.numpy()
ref_image = dataset[ix][1]
ref_image = ref_image.transpose(2, 0, 1)
ref_image = ref_image.astype(np.float16)
ref_image -= mean[:, None, None]
ref_image /= std[:, None, None]
assert_that(np.allclose(ref_image, image)).is_true()
ix += 1
def test_gpu_normalization():
dataset = DummyDataset(500, 25, 30)
with NamedTemporaryFile() as handle:
name = handle.name
fields = {
'index': IntField(),
'value': RGBImageField(write_mode='raw', jpeg_quality=95)
}
writer = DatasetWriter(name, fields, num_workers=2)
mean = np.array([0, 1, 2])
std = np.array([1, 10, 20])
writer.from_indexed_dataset(dataset, chunksize=5)
loader = Loader(name, batch_size=5, num_workers=2,
pipelines={
'value': [
SimpleRGBImageDecoder(),
ToTensor(),
ToDevice(ch.device('cuda:0')),
ToTorchImage(),
NormalizeImage(mean, std, np.float16),
View(ch.float16),
]
})
ix = 0
for res in tqdm(loader):
_, images = res
for image in images:
image = image.cpu().numpy()
ref_image = dataset[ix][1]
ref_image = ref_image.transpose(2, 0, 1)
ref_image = ref_image.astype(np.float16)
ref_image -= mean[:, None, None]
ref_image /= std[:, None, None]
assert_that(np.allclose(ref_image, image)).is_true()
ix += 1
| ffcv-main | tests/test_image_normalization.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.pipeline.operation import Operation
from ffcv.transforms.ops import ToTensor
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.loader import Loader
from ffcv.fields import IntField, FloatField, BytesField
from ffcv.fields.basics import FloatDecoder
from ffcv.pipeline.state import State
from test_writer import DummyDataset
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
class Doubler(Operation):
def generate_code(self) -> Callable:
def code(x, dst):
dst[:] = x * 2
return dst
return code
def declare_state_and_memory(self, previous_state: State):
return (previous_state, AllocationQuery(previous_state.shape, previous_state.dtype, previous_state.device))
def test_basic_simple():
length = 600
batch_size = 8
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()]
})
it = iter(loader)
indices, values = next(it)
assert_that(np.allclose(indices.squeeze().numpy(),
np.arange(batch_size))).is_true()
assert_that(np.allclose(2 * np.sin(np.arange(batch_size)),
values.squeeze().numpy())).is_true()
def test_multiple_iterators_success():
length = 60
batch_size = 8
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()]
})
it = iter(loader)
it = iter(loader)
def test_multiple_epoch_doesnt_recompile():
length = 60
batch_size = 8
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()]
})
it = iter(loader)
code = loader.code_per_stage
it = iter(loader)
new_code = loader.code_per_stage
assert_that(code).is_equal_to(new_code)
def test_multiple_epoch_does_recompile():
length = 60
batch_size = 8
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
recompile=True,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()]
})
it = iter(loader)
code = loader.code_per_stage
it = iter(loader)
new_code = loader.code_per_stage
assert_that(code).is_not_equal_to(new_code) | ffcv-main | tests/test_basic_pipeline.py |
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from torch.utils.data import Subset
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, RGBImageField
from ffcv.loader import Loader
from ffcv.pipeline.compiler import Compiler
class DummyDataset(Dataset):
def __init__(self, length, height, width, reversed=False):
self.length = length
self.height = height
self.width = width
self.reversed = reversed
def __len__(self):
return self.length
def __getitem__(self, index):
if index > self.length:
raise IndexError
dims = (self.height, self.width, 3)
image_data = ((np.ones(dims) * index) % 255).astype('uint8')
result = index,image_data
if self.reversed:
result = tuple(reversed(result))
return result
def create_and_validate(length, mode='raw', reversed=False):
dataset = DummyDataset(length, 500, 300, reversed=reversed)
with NamedTemporaryFile() as handle:
name = handle.name
fields = {
'index': IntField(),
'value': RGBImageField(write_mode=mode, jpeg_quality=95)
}
if reversed:
fields = {
'value': RGBImageField(write_mode=mode, jpeg_quality=95),
'index': IntField()
}
writer = DatasetWriter(name, fields, num_workers=2)
writer.from_indexed_dataset(dataset, chunksize=5)
Compiler.set_enabled(False)
loader = Loader(name, batch_size=5, num_workers=2)
for res in loader:
if not reversed:
index, images = res
else:
images , index = res
for i, image in zip(index, images):
if mode == 'raw':
assert_that(ch.all((image == (i % 255)).reshape(-1))).is_true()
else:
assert_that(ch.all((image == (i % 255)).reshape(-1))).is_true()
def make_and_read_cifar_subset(length):
my_dataset = Subset(CIFAR10(root='/tmp', train=True, download=True), range(length))
with NamedTemporaryFile() as handle:
name = handle.name
writer = DatasetWriter(name, {
'image': RGBImageField(write_mode='smart',
max_resolution=32),
'label': IntField(),
}, num_workers=2)
writer.from_indexed_dataset(my_dataset, chunksize=10)
Compiler.set_enabled(False)
loader = Loader(name, batch_size=5, num_workers=2)
for index, images in loader:
pass
def test_cifar_subset():
make_and_read_cifar_subset(200)
def test_simple_raw_image_pipeline():
create_and_validate(500, 'raw', False)
def test_simple_raw_image_pipeline_rev():
create_and_validate(500, 'raw', True)
def test_simple_jpg_image_pipeline():
create_and_validate(500, 'jpg', False)
def test_simple_jpg_image_pipeline_rev():
create_and_validate(500, 'jpg', True)
| ffcv-main | tests/test_image_pipeline.py |
import os
from tempfile import NamedTemporaryFile
from time import sleep, time
import os, psutil
import numpy as np
import pytest
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import BytesField, IntField
from ffcv.pipeline.compiler import Compiler
from ffcv import Loader
class DummyDataset(Dataset):
def __init__(self, l, size):
self.l = l
self.size = size
def __len__(self):
return self.l
def __getitem__(self, index):
if index > self.l:
raise IndexError
np.random.seed(index)
return index, np.random.randint(0, 255, size=self.size, dtype='u1')
def create_and_run(num_samples, size_bytes):
handle = NamedTemporaryFile()
with handle:
name = handle.name
dataset = DummyDataset(num_samples, size_bytes)
writer = DatasetWriter(num_samples, name, {
'index': IntField(),
'value': BytesField()
})
Compiler.set_enabled(True)
with writer:
writer.write_pytorch_dataset(dataset, num_workers=-1, chunksize=100)
total_dataset_size = num_samples * size_bytes
# Dataset should not be in RAM
process = psutil.Process(os.getpid())
assert_that(process.memory_info().rss).is_less_than(total_dataset_size)
loader = Loader(name, 128, 10)
for _ in tqdm(loader):
assert_that(process.memory_info().rss).is_less_than(total_dataset_size)
@pytest.mark.skipif(bool(os.environ.get('FFCV_RUN_MEMORY_LEAK_TEST', "0")),
reason="set FFCV_RUN_MEMORY_LEAK_TEST to enable it")
def test_memory_leak_write():
create_and_run(128100, 500*300*3) | ffcv-main | tests/test_memory_leak.py |
import numpy as np
from assertpy import assert_that
from numpy.random import shuffle
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import IntField, FloatField, BytesField
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
class DummyDataset(Dataset):
def __init__(self, l):
self.l = l
def __len__(self):
return self.l
def __getitem__(self, index):
if index > self.l:
raise IndexError()
return (index, np.sin(index))
class DummyDatasetWithData(Dataset):
def __init__(self, l):
self.l = l
def __len__(self):
return self.l
def __getitem__(self, index):
if index > self.l:
raise IndexError()
return (index, np.zeros(2))
def validate_simple_dataset(name, length, shuffled=False):
reader = Reader(name)
assert_that(reader.handlers).is_length(2)
assert_that(reader.handlers['index']).is_instance_of(IntField)
assert_that(reader.handlers['value']).is_instance_of(FloatField)
assert_that(reader.alloc_table).is_length(0)
assert_that(reader.metadata).is_length(length)
if shuffled:
assert_that((reader.metadata['f0'] == np.arange(length).astype('int')).all()).is_false()
assert_that(set(reader.metadata['f0'])).is_equal_to(set(np.arange(length).astype('int')))
else:
assert_that((reader.metadata['f0'] == np.arange(length).astype('int')).all()).is_true()
assert_that((np.sin(reader.metadata['f0']) == reader.metadata['f1']).all()).is_true()
def test_write_shuffle():
length = 600
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset, shuffle_indices=True)
validate_simple_dataset(name, length, shuffled=True)
def test_write_simple():
length = 600
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
validate_simple_dataset(name, length)
def test_multiple_workers():
length = 600
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(name, {
'index': IntField(),
'value': FloatField()
}, num_workers=30)
writer.from_indexed_dataset(dataset, chunksize=10000)
validate_simple_dataset(name, length)
def test_super_long():
length = 600000
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(name, {
'index': IntField(),
'value': FloatField()
}, num_workers=30)
writer.from_indexed_dataset(dataset, chunksize=10000)
validate_simple_dataset(name, length)
def test_small_chunks_multiple_workers():
length = 600
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDatasetWithData(length)
writer = DatasetWriter(name, {
'index': IntField(),
'value': BytesField()
}, num_workers=30)
writer.from_indexed_dataset(dataset, chunksize=1) | ffcv-main | tests/test_writer.py |
import numpy as np
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
from time import time
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import BytesField, IntField
from ffcv.pipeline.compiler import Compiler
from ffcv.memory_managers import OSCacheManager
from test_memory_allocation import DummyDataset
def create_and_validate(length, size, do_compile):
dataset = DummyDataset(length, size)
with NamedTemporaryFile() as handle:
name = handle.name
writer = DatasetWriter(name, {
'index': IntField(),
'value': BytesField()
}, num_workers=2)
writer.from_indexed_dataset(dataset, chunksize=5)
reader = Reader(name)
manager = OSCacheManager(reader)
context = manager.schedule_epoch(np.array([0, 1]))
indices = np.random.choice(length, 500)
addresses = reader.alloc_table['ptr'][indices]
sample_ids = reader.alloc_table['sample_id'][indices]
Compiler.set_enabled(do_compile)
read_fn = manager.compile_reader()
with context:
for addr, sample_id in zip(tqdm(addresses), sample_ids):
read_buffer = read_fn(addr, context.state)
np.random.seed(sample_id)
expected_buff = np.random.randint(0, 255, size=size, dtype='u1')
assert_that(read_buffer).is_length(len(expected_buff))
assert_that(np.all(read_buffer == expected_buff)).is_true()
# We skip the first which is compilation
def test_simple():
create_and_validate(600, 76, False)
def test_large():
create_and_validate(600, 1024, False)
def test_many():
create_and_validate(60000, 81, False)
def test_many_compiled():
create_and_validate(1000000, 1, True)
| ffcv-main | tests/test_memory_reader.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.pipeline.operation import Operation
from ffcv.transforms.ops import ToTensor
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.loader import Loader
from ffcv.fields import IntField, FloatField, BytesField
from ffcv.fields.basics import FloatDecoder
from ffcv.pipeline.state import State
from test_writer import DummyDataset
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
class Doubler(Operation):
def generate_code(self) -> Callable:
def code(x, dst):
dst[:] = x * 2
return dst
return code
def declare_state_and_memory(self, previous_state: State):
return (previous_state, AllocationQuery(previous_state.shape, previous_state.dtype, previous_state.device))
def test_basic_simple():
length = 600
batch_size = 8
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()],
'index': None
})
it = iter(loader)
result = next(it)
# We should only have one element in the tuple
assert_that(result).is_length(1)
values = result[0]
assert_that(np.allclose(2 * np.sin(np.arange(batch_size)),
values.squeeze().numpy())).is_true()
| ffcv-main | tests/test_partial_pipeline.py |
import string
from ctypes import pointer
from tempfile import NamedTemporaryFile
from collections import defaultdict
from assertpy.assertpy import assert_that
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, JSONField
from ffcv.fields.bytes import BytesDecoder
from ffcv.fields.basics import IntDecoder
from ffcv import Loader
options = list(string.ascii_uppercase + string.digits)
def generate_random_string(low, high):
length = np.random.randint(low, high)
content = ''.join(np.random.choice(options, size=length))
return content
class DummyDictDataset(Dataset):
def __init__(self, n_samples):
self.n_samples = n_samples
def __len__(self):
return self.n_samples
def __getitem__(self, index):
if index >= self.n_samples:
raise IndexError()
np.random.seed(index)
length = np.random.randint(5, 250)
content = np.random.randint(0, 256, size=(length,))
json_content = {}
for i in range(3):
json_content[generate_random_string(5, 10)] = generate_random_string(50, 250)
return index, json_content
def run_test(n_samples):
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyDictDataset(n_samples)
writer = DatasetWriter(name, {
'index': IntField(),
'activations': JSONField()
}, num_workers=3)
writer.from_indexed_dataset(dataset)
loader = Loader(name, batch_size=3, num_workers=5,
pipelines={
'activation': [BytesDecoder()],
'index': [IntDecoder()]
}
)
ix = 0
for _, json_encoded in loader:
json_docs = JSONField.unpack(json_encoded)
for doc in json_docs:
ref_doc = dataset[ix][1]
assert_that(sorted(doc.items())).is_equal_to(sorted(ref_doc.items()))
ix += 1
def test_simple_dict():
run_test(32)
| ffcv-main | tests/test_json_field.py |
import numpy as np
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import BytesField, IntField
class DummyDataset(Dataset):
def __init__(self, l, size):
self.l = l
self.size = size
def __len__(self):
return self.l
def __getitem__(self, index):
if index > self.l:
raise IndexError
np.random.seed(index)
return index, np.random.randint(0, 255, size=self.size, dtype='u1')
def create_and_validate(length, size):
dataset = DummyDataset(length, size)
with NamedTemporaryFile() as handle:
name = handle.name
writer = DatasetWriter(name, {
'index': IntField(),
'value': BytesField()
}, num_workers=2)
writer.from_indexed_dataset(dataset, chunksize=5)
reader = Reader(name)
assert_that(reader.handlers).is_length(2)
assert_that(reader.handlers['index']).is_instance_of(IntField)
assert_that(reader.handlers['value']).is_instance_of(BytesField)
assert_that(reader.alloc_table).is_length(length)
assert_that(reader.metadata).is_length(length)
assert_that((reader.metadata['f0'] == np.arange(length).astype('int')).all()).is_true()
assert_that(np.all(reader.alloc_table['size'] == size)).is_true()
def test_simple():
create_and_validate(600, 76)
def test_large():
create_and_validate(600, 1024)
def test_many():
create_and_validate(60000, 81)
| ffcv-main | tests/test_memory_allocation.py |
import os
import uuid
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from torchvision import transforms as tvt
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from torchvision.utils import save_image, make_grid
from torch.utils.data import Subset
from ffcv.fields.basics import IntDecoder
from ffcv.fields.rgb_image import SimpleRGBImageDecoder
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, RGBImageField
from ffcv.loader import Loader
from ffcv.pipeline.compiler import Compiler
from ffcv.transforms import *
SAVE_IMAGES = True
IMAGES_TMP_PATH = '/tmp/ffcv_augtest_output'
if SAVE_IMAGES:
os.makedirs(IMAGES_TMP_PATH, exist_ok=True)
UNAUGMENTED_PIPELINE=[
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage()
]
def run_test(length, pipeline, compile=False):
my_dataset = Subset(CIFAR10(root='/tmp', train=True, download=True), range(length))
with NamedTemporaryFile() as handle:
name = handle.name
writer = DatasetWriter(name, {
'image': RGBImageField(write_mode='smart',
max_resolution=32),
'label': IntField(),
}, num_workers=2)
writer.from_indexed_dataset(my_dataset, chunksize=10)
Compiler.set_enabled(compile)
loader = Loader(name, batch_size=7, num_workers=2, pipelines={
'image': pipeline,
'label': [IntDecoder(), ToTensor(), Squeeze()]
},
drop_last=False)
unaugmented_loader = Loader(name, batch_size=7, num_workers=2, pipelines={
'image': UNAUGMENTED_PIPELINE,
'label': [IntDecoder(), ToTensor(), Squeeze()]
}, drop_last=False)
tot_indices = 0
tot_images = 0
for (images, labels), (original_images, original_labels) in zip(loader, unaugmented_loader):
print(images.shape, original_images.shape)
tot_indices += labels.shape[0]
tot_images += images.shape[0]
for label, original_label in zip(labels, original_labels):
assert_that(label).is_equal_to(original_label)
if SAVE_IMAGES:
save_image(make_grid(ch.concat([images, original_images])/255., images.shape[0]),
os.path.join(IMAGES_TMP_PATH, str(uuid.uuid4()) + '.jpeg')
)
assert_that(tot_indices).is_equal_to(len(my_dataset))
assert_that(tot_images).is_equal_to(len(my_dataset))
def test_cutout():
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
Cutout(8),
ToTensor(),
ToTorchImage()
], comp)
def test_flip():
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
RandomHorizontalFlip(1.0),
ToTensor(),
ToTorchImage()
], comp)
def test_module_wrapper():
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage(),
ModuleWrapper(tvt.Grayscale(3)),
], comp)
def test_mixup():
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
ImageMixup(.5, False),
ToTensor(),
ToTorchImage()
], comp)
def test_poison():
mask = np.zeros((32, 32, 3))
# Red sqaure
mask[:5, :5, 0] = 1
alpha = np.ones((32, 32))
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
Poison(mask, alpha, list(range(100))),
ToTensor(),
ToTorchImage()
], comp)
def test_random_resized_crop():
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
RandomResizedCrop(scale=(0.08, 1.0),
ratio=(0.75, 4/3),
size=32),
ToTensor(),
ToTorchImage()
], comp)
def test_translate():
for comp in [True, False]:
run_test(100, [
SimpleRGBImageDecoder(),
RandomTranslate(padding=10),
ToTensor(),
ToTorchImage()
], comp)
## Torchvision Transforms
def test_torchvision_greyscale():
run_test(100, [
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage(),
tvt.Grayscale(3),
])
def test_torchvision_centercrop_pad():
run_test(100, [
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage(),
tvt.CenterCrop(10),
tvt.Pad(11)
])
def test_torchvision_random_affine():
run_test(100, [
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage(),
tvt.RandomAffine(25),
])
def test_torchvision_random_crop():
run_test(100, [
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage(),
tvt.Pad(10),
tvt.RandomCrop(size=32),
])
def test_torchvision_color_jitter():
run_test(100, [
SimpleRGBImageDecoder(),
ToTensor(),
ToTorchImage(),
tvt.ColorJitter(.5, .5, .5, .5),
])
if __name__ == '__main__':
# test_cutout()
test_flip()
# test_module_wrapper()
# test_mixup()
# test_poison()
# test_random_resized_crop()
# test_translate()
## Torchvision Transforms
# test_torchvision_greyscale()
# test_torchvision_centercrop_pad()
# test_torchvision_random_affine()
# test_torchvision_random_crop()
# test_torchvision_color_jitter()
| ffcv-main | tests/test_augmentations.py |
from dataclasses import replace
import torch as ch
from ffcv.pipeline.allocation_query import AllocationQuery
from ffcv.pipeline.compiler import Compiler
import numpy as np
from typing import Callable
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.pipeline.operation import Operation
from ffcv.transforms.ops import ToTensor
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.loader import Loader
from ffcv.fields import IntField, FloatField, BytesField
from ffcv.fields.basics import FloatDecoder
from ffcv.pipeline.state import State
from test_writer import DummyDataset
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
class Doubler(Operation):
def generate_code(self) -> Callable:
def code(x, dst):
dst[:] = x * 2
return dst
return code
def declare_state_and_memory(self, previous_state: State):
return (previous_state, AllocationQuery(previous_state.shape, previous_state.dtype, previous_state.device))
def test_basic_simple():
length = 600
batch_size = 8
with NamedTemporaryFile() as handle:
file_name = handle.name
dataset = DummyDataset(length)
writer = DatasetWriter(file_name, {
'index': IntField(),
'value': FloatField()
})
writer.from_indexed_dataset(dataset)
Compiler.set_enabled(True)
loader = Loader(file_name, batch_size, num_workers=5, seed=17,
pipelines={
'value': [FloatDecoder(), Doubler(), ToTensor()],
})
def cond(value):
value = value[0]
result = value < 1 and value >= 0.5
return result
filtered = loader.filter('value', cond)
assert_that(len(filtered)).is_greater_than(0)
for index, values in filtered:
assert_that(values.shape[0]).is_equal_to(batch_size)
assert_that(((values < 1) & (values >= 0.5)).all()).is_true() | ffcv-main | tests/test_loader_filter.py |
from ffcv.transforms.ops import ToTensor
from ffcv.fields.rgb_image import RandomResizedCropRGBImageDecoder, SimpleRGBImageDecoder, CenterCropRGBImageDecoder
import numpy as np
import torch as ch
from torch.utils.data import Dataset
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from torchvision.datasets import CIFAR10
from torch.utils.data import Subset
from ffcv.writer import DatasetWriter
from ffcv.fields import IntField, RGBImageField
from ffcv.loader import Loader
from ffcv.pipeline.compiler import Compiler
class DummyDataset(Dataset):
def __init__(self, length, size_range):
self.length = length
self.size_range = size_range
def __len__(self):
return self.length
def __getitem__(self, index):
if index > self.length:
raise IndexError
dims = (
np.random.randint(self.size_range[0], self.size_range[1] + 1),
np.random.randint(self.size_range[0], self.size_range[1] + 1),
3
)
image_data = ((np.ones(dims) * index) % 255).astype('uint8')
return index, image_data
def create_and_validate(length, decoder, size, mode='raw', compile=False):
dataset = DummyDataset(length, (300, 500))
with NamedTemporaryFile() as handle:
name = handle.name
fields = {
'index': IntField(),
'value': RGBImageField(write_mode=mode)
}
writer = DatasetWriter(name, fields, num_workers=2)
writer.from_indexed_dataset(dataset, chunksize=5)
Compiler.set_enabled(compile)
loader = Loader(name, batch_size=5, num_workers=2,
pipelines={
'value': [decoder, ToTensor()]
})
for index, images in loader:
for i, image in zip(index, images):
assert_that(image.shape).is_equal_to((size[0], size[1], 3))
if mode == 'raw':
assert_that(ch.all((image == (i % 255)).reshape(-1))).is_true()
else:
assert_that(ch.all((image == (i % 255)).reshape(-1))).is_true()
def test_simple_image_decoder_fails_with_variable_images():
decoder = SimpleRGBImageDecoder()
assert_that(create_and_validate).raises(TypeError).when_called_with(500, decoder, 32, 'raw')
# def test_rrc_decoder_raw():
# size = (160, 160)
# decoder = RandomResizedCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'raw')
#
# def test_rrc_decoder_jpg():
# size = (160, 160)
# decoder = RandomResizedCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'jpg')
#
# def test_rrc_decoder_raw_compiled():
# size = (160, 160)
# decoder = RandomResizedCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'raw', True)
#
# def test_rrc_decoder_jpg_compiled():
# size = (160, 160)
# decoder = RandomResizedCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'jpg', True)
#
# def test_cc_decoder_raw_nc():
# size = (160, 160)
# decoder = CenterCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'raw')
#
# def test_cc_decoder_jpg_nc():
# size = (160, 160)
# decoder = CenterCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'jpg')
#
# def test_cc_decoder_raw_compiled():
# size = (160, 160)
# decoder = CenterCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'raw', True)
#
# def test_cc_decoder_jpg_compiled():
# size = (160, 160)
# decoder = CenterCropRGBImageDecoder(size)
# create_and_validate(500, decoder, size, 'jpg', True) | ffcv-main | tests/test_rrc.py |
from os import path
from glob import glob
import tempfile
import numpy as np
from tempfile import TemporaryDirectory, NamedTemporaryFile
import torch as ch
from torch.utils.data import Dataset
import webdataset as wds
from ffcv import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import IntField, FloatField
from test_writer import validate_simple_dataset
field_names = [
'index',
'value.pyd'
]
class DummyDataset(Dataset):
def __init__(self, l):
self.l = l
def __len__(self):
return self.l
def __getitem__(self, index):
if index >= self.l:
raise IndexError()
return (index, np.sin(index))
def write_webdataset(folder, dataset, field_names):
pattern = path.join(folder, "dataset-%06d.tar")
writer = wds.ShardWriter(pattern, maxcount=20)
with writer as sink:
for i, sample in enumerate(dataset):
data = {
'__key__': f'sample_{i}'
}
for field_name, value in zip(field_names, sample):
data[field_name] = value
sink.write(data)
def pipeline(dataset):
return (dataset
.decode()
.to_tuple(*field_names)
)
if __name__ == '__main__':
N = 1007
dataset = DummyDataset(N)
with TemporaryDirectory() as temp_directory:
with NamedTemporaryFile() as handle:
fname = handle.name
write_webdataset(temp_directory, dataset, field_names)
files = glob(path.join(temp_directory, '*'))
files = list(sorted(files))
print(fname)
writer = DatasetWriter(fname, {
'index': IntField(),
'value': FloatField()
})
writer.from_webdataset(files, pipeline)
validate_simple_dataset(fname, N, shuffled=False) | ffcv-main | tests/test_webdataset.py |
import numpy as np
from tqdm import tqdm
from assertpy import assert_that
from torch.utils.data import Dataset
import logging
from time import time
import os
from assertpy import assert_that
from tempfile import NamedTemporaryFile
from ffcv.writer import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import IntField, RGBImageField
from ffcv.pipeline.compiler import Compiler
from ffcv.memory_managers import OSCacheManager
class DummyDataset(Dataset):
def __init__(self, length):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, index):
if index >= self.length:
raise IndexError
np.random.seed(37 + index)
dims = tuple([128, 128, 3])
image_data = np.random.randint(low=0, high=255, size=dims, dtype='uint8')
return index, image_data
def create_and_validate(length, mode='raw', compile=False):
dataset = DummyDataset(length)
with NamedTemporaryFile() as handle:
name = handle.name
writer = DatasetWriter(name, {
'index': IntField(),
'value': RGBImageField(write_mode=mode)
}, num_workers=2)
writer.from_indexed_dataset(dataset, chunksize=5)
reader = Reader(name)
manager = OSCacheManager(reader)
context = manager.schedule_epoch(np.array([0, 1]))
Compiler.set_enabled(compile)
with context:
Decoder = RGBImageField().get_decoder_class()
decoder = Decoder()
decoder.accept_globals(reader.metadata['f1'], manager.compile_reader())
decode = Compiler.compile(decoder.generate_code())
assert_that(reader.metadata).is_length(length)
buff = np.zeros((1, 128, 128, 3), dtype='uint8')
for i in range(length):
result = decode(np.array([i]), buff, reader.metadata['f1'], context.state)[0]
_, ref_image = dataset[i]
assert_that(result.shape).is_equal_to(ref_image.shape)
if mode == 'jpg':
dist = np.abs(ref_image.astype('float') - result.astype('float'))
assert_that(dist.mean()).is_less_than(80)
else:
assert_that(np.all(ref_image == result)).is_true()
def test_simple_image_dataset_raw():
create_and_validate(500, 'raw')
def test_simple_image_dataset_jpg():
create_and_validate(100, 'jpg')
def test_simple_image_dataset_raw_compile():
create_and_validate(500, 'raw', True)
def test_simple_image_dataset_jpg_compile():
create_and_validate(100, 'jpg', True)
| ffcv-main | tests/test_image_read.py |
from ctypes import pointer
from tempfile import NamedTemporaryFile
from collections import defaultdict
from assertpy.assertpy import assert_that
from assertpy import assert_that
import numpy as np
from torch.utils.data import Dataset
from ffcv import DatasetWriter
from ffcv.fields import IntField, NDArrayField
from ffcv import Loader
class DummyActivationsDataset(Dataset):
def __init__(self, n_samples, shape):
self.n_samples = n_samples
self.shape = shape
def __len__(self):
return self.n_samples
def __getitem__(self, index):
if index >= self.n_samples:
raise IndexError()
np.random.seed(index)
return index, np.random.randn(*self.shape).astype('<f4')
class TripleDummyActivationsDataset(Dataset):
def __init__(self, n_samples, shape):
self.n_samples = n_samples
self.shape = shape
def __len__(self):
return self.n_samples
def __getitem__(self, index):
if index >= self.n_samples:
raise IndexError()
np.random.seed(index)
d1 = np.random.randn(*self.shape).astype('<f4')
d2 = np.random.randn(*self.shape).astype('<f4')
d3 = np.random.randn(*self.shape).astype('<f4')
return index, d1, d2, d3
def run_test(n_samples, shape):
with NamedTemporaryFile() as handle:
name = handle.name
dataset = DummyActivationsDataset(n_samples, shape)
writer = DatasetWriter(name, {
'index': IntField(),
'activations': NDArrayField(np.dtype('<f4'), shape)
}, num_workers=3)
writer.from_indexed_dataset(dataset)
loader = Loader(name, batch_size=3, num_workers=5)
for ixes, activations in loader:
for ix, activation in zip(ixes, activations):
assert_that(np.all(dataset[ix][1] == activation.numpy())).is_true()
def test_simple_activations():
run_test(4096, (2048,))
def test_multi_fields():
n_samples = 4096
shape = (2048,)
with NamedTemporaryFile() as handle:
name = handle.name
dataset = TripleDummyActivationsDataset(n_samples, shape)
writer = DatasetWriter(name, {
'index': IntField(),
'activations': NDArrayField(np.dtype('<f4'), shape),
'activations2': NDArrayField(np.dtype('<f4'), shape),
'activations3': NDArrayField(np.dtype('<f4'), shape)
}, num_workers=1)
writer.from_indexed_dataset(dataset)
loader = Loader(name, batch_size=3, num_workers=5)
page_size_l2 = int(np.log2(loader.reader.page_size))
sample_ids = loader.reader.alloc_table['sample_id']
pointers = loader.reader.alloc_table['ptr']
pages = pointers >> page_size_l2
sample_to_pages = defaultdict(set)
for sample_id, page in zip(sample_ids, pages):
sample_to_pages[sample_id].add(page)
assert_that(sample_to_pages[sample_id]).is_length(1)
for ixes, activations, d2, d3 in loader:
for ix, activation in zip(ixes, activations):
assert_that(np.all(dataset[ix][1] == activation.numpy())).is_true() | ffcv-main | tests/test_array_field.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'FFCV'
copyright = '2022, ffcv'
author = 'ffcv'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
autodoc_mock_imports = ['torch', 'torchvision', 'cv2', 'PIL', 'ffcv.libffcv']
autodoc_member_order = 'bysource'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'karma_sphinx_theme'
autodoc_default_options = {
'undoc-members': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
] | ffcv-main | docs/conf.py |
from .loader import Loader
from .writer import DatasetWriter
__version__ = '0.0.3rc1'
__all__ = ['Loader']
| ffcv-main | ffcv/__init__.py |
from typing import List
import numpy as np
from .fields.base import Field
from .fields import (
FloatField, IntField, RGBImageField,
BytesField, NDArrayField, JSONField
)
CURRENT_VERSION = 2
# Note that in this file we use dtypes in the format <u4 indead of uint32. This
# forces endinaness of the data making datasets compatible between different
# CPU architectures ARM for example is big-endian and x86 is little endian. We
# fix the endianness to little-endian as we suspect that this library will
# mostly be used on x86 architecture (at least in the near future)
# Type describing the data coming
HeaderType = np.dtype([
('version', '<u2'),
('num_fields', '<u2'),
('page_size', '<u4'),
('num_samples', '<u8'),
('alloc_table_ptr', '<u8')
], align=True)
ALLOC_TABLE_TYPE = np.dtype([
('sample_id', '<u8'),
('ptr', '<u8'),
('size', '<u8'),
])
FieldDescType = np.dtype([
# This identifier will inform us on how to decode that field
('type_id', '<u1'),
('name', ('<u1', 16)),
# Data that will depend on the type of the field (some might need arguments
# like images, but some might not like integers and floats)
('arguments', ('<u1', (1024, )))
], align=True)
# Map from type_id to the handler for that kind of data
TYPE_ID_HANDLER = {
255 : None,
0 : FloatField,
1 : IntField,
2 : RGBImageField,
3 : BytesField,
4 : NDArrayField,
5 : JSONField
}
# Parse the fields descriptors from the header of the dataset
# Return the corresponding handlers
def get_handlers(field_descriptors):
handlers = []
for field_descriptor in field_descriptors:
type_id = field_descriptor['type_id']
Handler = TYPE_ID_HANDLER[type_id]
if Handler is None:
handlers.append(None)
else:
handlers.append(Handler.from_binary(field_descriptor['arguments']))
return handlers
# From a list of handlers return the combined data type that will
# describe a complete sample
def get_metadata_type(handlers: List[Field]) -> np.dtype:
return np.dtype([('', handler.metadata_type) for handler in handlers],
align=True)
| ffcv-main | ffcv/types.py |
import numpy as np
from time import sleep
from os import SEEK_END
from multiprocessing import Value
from .utils import align_to_page
import ctypes
class MemoryAllocator():
def __init__(self, fname, offset_start, page_size):
self.fname = fname
self.offset = align_to_page(offset_start, page_size)
self.next_page_allocated = Value(ctypes.c_uint64, 0)
self.next_page_written = Value(ctypes.c_uint64, 0)
self.page_size = page_size
self.page_offset = 0
self.my_page = -1
self.page_data = np.zeros(self.page_size, '<u1')
self.allocations = []
self.current_sample_id = None
def __enter__(self):
self.fp = open(self.fname, 'ab', buffering=0)
def set_current_sample(self, current_sample_id):
self.current_sample_id = current_sample_id
@property
def space_left_in_page(self):
# We don't have a page allocated yet
if self.my_page < 0:
return 0
return self.page_size - self.page_offset
def malloc(self, size):
# print(f"Allocating {size} bytes")
if size > self.page_size:
raise ValueError(f"Tried allocating {size} but" +
" page size is {self.page_size}")
if size > self.space_left_in_page:
self.flush_page()
# We book the next available page in the file
with self.next_page_allocated.get_lock():
self.my_page = self.next_page_allocated.value
self.next_page_allocated.value = self.my_page + 1
self.page_offset = 0
# This is a new page so we erate the content of the buffer
self.page_data.fill(0)
# We check if we already allocated space for this sample on
# the page that is now full
region_in_previous_page = False
while self.allocations and self.allocations[-1][0] == self.current_sample_id:
# We have to revert the allocations we did and we are giving
# up on this sample.
self.allocations.pop()
# We found at least memory region from the preivous page
region_in_previous_page = True
# The writer will restart from this freshly allocated page
if region_in_previous_page:
raise MemoryError("Not enough memory to fit the whole sample")
previous_offset = self.page_offset
self.page_offset += size
buffer = self.page_data[previous_offset:self.page_offset]
ptr = self.offset + self.my_page * self.page_size + previous_offset
# We return the pointer to the location in file and where to write
# the data
self.allocations.append((self.current_sample_id, ptr, size))
return ptr, buffer
def flush_page(self):
# If we haven't allocated any page we end there
if self.my_page < 0:
return
# We shouldn't have allocated a page and have nothing to write on it
assert self.page_offset != 0
# Wait until it's my turn to write
while self.next_page_written.value != self.my_page:
# Essentially a spin lock
# TODO we could replace it with like exponential backoff
sleep(0.001)
pass
# Now it's my turn to write
expected_file_offset = self.offset + self.my_page * self.page_size
# in order to be aligned with page size
# If this is the first page we have to pad with zeros
if self.my_page == 0:
# print("Padding headers to align with page size")
current_location = self.fp.seek(0, SEEK_END)
null_bytes_to_write = expected_file_offset - current_location
self.fp.write(np.zeros(null_bytes_to_write, dtype='<u1').tobytes())
# print(f"current file pointer is no {self.fp.tell()} and should be {expected_file_offset}")
self.fp.seek(expected_file_offset)
# print(f"Writing page {self.my_page} at offset {self.fp.tell()}")
self.fp.write(self.page_data.tobytes())
# print(f"Done writing {self.my_page} at offset {self.fp.tell()}")
# We warn other processes that they are free to write the next page
with self.next_page_written.get_lock():
self.next_page_written.value += 1
# Flush the last page and
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush_page()
self.fp.close()
| ffcv-main | ffcv/memory_allocator.py |
import numpy as np
from .utils import decode_null_terminated_string
from .types import (ALLOC_TABLE_TYPE, HeaderType, CURRENT_VERSION,
FieldDescType, get_handlers, get_metadata_type)
class Reader:
def __init__(self, fname, custom_handlers={}):
self._fname = fname
self._custom_handlers = custom_handlers
self.read_header()
self.read_field_descriptors()
self.read_metadata()
self.read_allocation_table()
@property
def file_name(self):
return self._fname
def read_header(self):
header = np.fromfile(self._fname, dtype=HeaderType, count=1)[0]
header.setflags(write=False)
version = header['version']
if version != CURRENT_VERSION:
msg = f"file format mismatch: code={CURRENT_VERSION},file={version}"
raise AssertionError(msg)
self.num_samples = header['num_samples']
self.page_size = header['page_size']
self.num_fields = header['num_fields']
self.header = header
def read_field_descriptors(self):
offset = HeaderType.itemsize
field_descriptors = np.fromfile(self._fname, dtype=FieldDescType,
count=self.num_fields, offset=offset)
field_descriptors.setflags(write=False)
handlers = get_handlers(field_descriptors)
self.field_descriptors = field_descriptors
self.field_names = list(map(decode_null_terminated_string,
self.field_descriptors['name']))
self.handlers = dict(zip(self.field_names, handlers))
for field_name, field_desc in zip(self.field_names, self.field_descriptors):
if field_name in self._custom_handlers:
CustomHandler = self._custom_handlers[field_name]
self.handlers[field_name] = CustomHandler.from_binary(field_desc['arguments'])
for field_name, handler in self.handlers.items():
if handler is None:
raise ValueError(f"Must specify a custom_field entry " \
f"for custom field {field_name}")
self.metadata_type = get_metadata_type(list(self.handlers.values()))
def read_metadata(self):
offset = HeaderType.itemsize + self.field_descriptors.nbytes
self.metadata = np.fromfile(self._fname, dtype=self.metadata_type,
count=self.num_samples, offset=offset)
self.metadata.setflags(write=False)
def read_allocation_table(self):
offset = self.header['alloc_table_ptr']
alloc_table = np.fromfile(self._fname, dtype=ALLOC_TABLE_TYPE,
offset=offset)
alloc_table.setflags(write=False)
self.alloc_table = alloc_table
| ffcv-main | ffcv/reader.py |
import numpy as np
from numba import types
from numba.extending import intrinsic
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def is_power_of_2(n):
return (n & (n-1) == 0) and n != 0
def align_to_page(ptr, page_size):
# If we are not aligned with the start of a page:
if ptr % page_size != 0:
ptr = ptr + page_size - ptr % page_size
return ptr
def decode_null_terminated_string(bytes: np.ndarray):
return bytes.tobytes().decode('ascii').split('\x00')[0]
@intrinsic
def cast_int_to_byte_ptr(typingctx, src):
# check for accepted types
if isinstance(src, types.Integer):
# create the expected type signature
result_type = types.CPointer(types.uint8)
sig = result_type(types.uintp)
# defines the custom code generation
def codegen(context, builder, signature, args):
# llvm IRBuilder code here
[src] = args
rtype = signature.return_type
llrtype = context.get_value_type(rtype)
return builder.inttoptr(src, llrtype)
return sig, codegen
from threading import Lock
s_print_lock = Lock()
def s_print(*a, **b):
"""Thread safe print function"""
with s_print_lock:
print(*a, **b)
| ffcv-main | ffcv/utils.py |
import ctypes
from numba import njit
import numpy as np
from ctypes import CDLL, c_int64, c_uint8, c_uint64, POINTER, c_void_p, c_uint32, c_bool, cdll
import ffcv._libffcv
lib = CDLL(ffcv._libffcv.__file__)
libc = cdll.LoadLibrary('libc.so.6')
read_c = libc.pread
read_c.argtypes = [c_uint32, c_void_p, c_uint64, c_uint64]
def read(fileno:int, destination:np.ndarray, offset:int):
return read_c(fileno, destination.ctypes.data, destination.size, offset)
ctypes_resize = lib.resize
ctypes_resize.argtypes = 11 * [c_int64]
def resize_crop(source, start_row, end_row, start_col, end_col, destination):
ctypes_resize(0,
source.ctypes.data,
source.shape[0], source.shape[1],
start_row, end_row, start_col, end_col,
destination.ctypes.data,
destination.shape[0], destination.shape[1])
# Extract and define the interface of imdeocde
ctypes_imdecode = lib.imdecode
ctypes_imdecode.argtypes = [
c_void_p, c_uint64, c_uint32, c_uint32, c_void_p, c_uint32, c_uint32,
c_uint32, c_uint32, c_uint32, c_uint32, c_bool, c_bool
]
def imdecode(source: np.ndarray, dst: np.ndarray,
source_height: int, source_width: int,
crop_height=None, crop_width=None,
offset_x=0, offset_y=0, scale_factor_num=1, scale_factor_denom=1,
enable_crop=False, do_flip=False):
return ctypes_imdecode(source.ctypes.data, source.size,
source_height, source_width, dst.ctypes.data,
crop_height, crop_width, offset_x, offset_y, scale_factor_num, scale_factor_denom,
enable_crop, do_flip)
ctypes_memcopy = lib.my_memcpy
ctypes_memcopy.argtypes = [c_void_p, c_void_p, c_uint64]
def memcpy(source: np.ndarray, dest: np.ndarray):
return ctypes_memcopy(source.ctypes.data, dest.ctypes.data, source.size)
| ffcv-main | ffcv/libffcv.py |
from functools import partial
from typing import Callable, List, Mapping
from os import SEEK_END, path
import numpy as np
from time import sleep
import ctypes
from multiprocessing import (shared_memory, cpu_count, Queue, Process, Value)
from tqdm import tqdm
from tqdm.contrib.concurrent import thread_map
from .utils import chunks, is_power_of_2
from .fields.base import Field
from .memory_allocator import MemoryAllocator
from .types import (TYPE_ID_HANDLER, get_metadata_type, HeaderType,
FieldDescType, CURRENT_VERSION, ALLOC_TABLE_TYPE)
MIN_PAGE_SIZE = 1 << 21 # 2MiB, which is the most common HugePage size
def from_shard(shard, pipeline):
# We import webdataset here so that it desn't crash if it's not required
# (Webdataset is an optional depdency)
from webdataset import WebDataset
dataset = WebDataset(shard)
dataset = pipeline(dataset)
return dataset
def count_samples_in_shard(shard, pipeline):
#
# We count the length of the dataset
# We are not using __len__ since it might not be implemented
count = 0
for _ in from_shard(shard, pipeline):
count += 1
return count
def handle_sample(sample, dest_ix, field_names, metadata, allocator, fields):
# We should only have to retry at least one
for i in range(2):
try:
allocator.set_current_sample(dest_ix)
# We extract the sample in question from the dataset
# We write each field individually to the metadata region
for field_name, field, field_value in zip(field_names, fields.values(), sample):
destination = metadata[field_name][dest_ix: dest_ix + 1]
field.encode(destination, field_value, allocator.malloc)
# We managed to write all the data without reaching
# the end of the page so we stop retrying
break
# If we couldn't fit this sample in the previous page we retry once from a fresh page
except MemoryError:
# We raise the error if it happens on the second try
if i == 1:
raise
def worker_job_webdataset(input_queue, metadata_sm, metadata_type, fields,
allocator, done_number, allocations_queue, pipeline):
metadata = np.frombuffer(metadata_sm.buf, dtype=metadata_type)
field_names = metadata_type.names
# This `with` block ensures that all the pages allocated have been written
# onto the file
with allocator:
while True:
todo = input_queue.get()
if todo is None:
# No more work left to do
break
shard, offset = todo
# For each sample in the chunk
done = 0
for i, sample in enumerate(from_shard(shard, pipeline)):
done += 1
dest_ix = offset + i
handle_sample(sample, dest_ix, field_names, metadata, allocator, fields)
# We warn the main thread of our progress
with done_number.get_lock():
done_number.value += done
allocations_queue.put(allocator.allocations)
def worker_job_indexed_dataset(input_queue, metadata_sm, metadata_type, fields,
allocator, done_number, allocations_queue, dataset):
metadata = np.frombuffer(metadata_sm.buf, dtype=metadata_type)
field_names = metadata_type.names
# This `with` block ensures that all the pages allocated have been written
# onto the file
with allocator:
while True:
chunk = input_queue.get()
if chunk is None:
# No more work left to do
break
# For each sample in the chunk
for dest_ix, source_ix in chunk:
sample = dataset[source_ix]
handle_sample(sample, dest_ix, field_names, metadata, allocator, fields)
# We warn the main thread of our progress
with done_number.get_lock():
done_number.value += len(chunk)
allocations_queue.put(allocator.allocations)
class DatasetWriter():
"""Writes given dataset into FFCV format (.beton).
Supports indexable objects (e.g., PyTorch Datasets) and webdataset.
Parameters
----------
fname: str
File name to store dataset in FFCV format (.beton)
fields : Mapping[str, Field]
Map from keys to Field's (order matters!)
page_size : int
Page size used internally
num_workers : int
Number of processes to use
"""
def __init__(self, fname: str, fields: Mapping[str, Field],
page_size: int = 4 * MIN_PAGE_SIZE, num_workers: int = -1):
self.fields = fields
self.fname = fname
self.metadata_type = get_metadata_type(list(self.fields.values()))
self.num_workers = num_workers
# We use all cores by default
if self.num_workers < 1:
self.num_workers = cpu_count()
if not is_power_of_2(page_size):
raise ValueError(f'page_size isnt a power of 2')
if page_size < MIN_PAGE_SIZE:
raise ValueError(f"page_size can't be lower than{MIN_PAGE_SIZE}")
self.page_size = page_size
def prepare(self):
with open(self.fname, 'wb') as fp:
# Prepare the header data
header = np.zeros(1, dtype=HeaderType)[0]
header['version'] = CURRENT_VERSION
header['num_samples'] = self.num_samples
header['num_fields'] = len(self.fields)
header['page_size'] = self.page_size
self.header = header
# We will write the header at the end because we need to know where
# The memory allocation table is in the file
# We still write it here to make space for it later
fp.write(self.header.tobytes())
# Writes the information about the fields
fields_descriptor = np.zeros(len(self.fields),
dtype=FieldDescType)
field_type_to_type_id = {v: k for (k, v) in TYPE_ID_HANDLER.items()}
fieldname_max_len = fields_descriptor[0]['name'].shape[0]
for i, (name, field) in enumerate(self.fields.items()):
type_id = field_type_to_type_id.get(type(field), 255)
encoded_name = name.encode('ascii')
encoded_name = np.frombuffer(encoded_name, dtype='<u1')
actual_length = min(fieldname_max_len, len(encoded_name))
fields_descriptor[i]['type_id'] = type_id
fields_descriptor[i]['name'][:actual_length] = (
encoded_name[:actual_length])
fields_descriptor[i]['arguments'][:] = field.to_binary()[0]
fp.write(fields_descriptor.tobytes())
total_metadata_size = self.num_samples * self.metadata_type.itemsize
# Shared memory for all the writers to fill the information
self.metadata_sm = 3
self.metadata_start = (HeaderType.itemsize + fields_descriptor.nbytes)
self.metadata_sm = shared_memory.SharedMemory(create=True,
size=total_metadata_size)
self.data_region_start = self.metadata_start + total_metadata_size
def _write_common(self, num_samples, queue_content, work_fn, extra_worker_args):
self.num_samples = num_samples
self.prepare()
allocation_list = []
# Makes a memmap to the metadata for the samples
# We publish all the work that has to be done into a queue
workqueue: Queue = Queue()
for todo in queue_content:
workqueue.put(todo)
# This will contain all the memory allocations each worker
# produced. This will go at the end of the file
allocations_queue: Queue = Queue()
# We add a token for each worker to warn them that there
# is no more work to be done
for _ in range(self.num_workers):
workqueue.put(None)
# Define counters we need to orchestrate the workers
done_number = Value(ctypes.c_uint64, 0)
allocator = MemoryAllocator(self.fname,
self.data_region_start,
self.page_size)
# Arguments that have to be passed to the workers
worker_args = (workqueue, self.metadata_sm,
self.metadata_type, self.fields,
allocator, done_number,
allocations_queue, *extra_worker_args)
# Create the workers
processes = [Process(target=work_fn, args=worker_args)
for _ in range(self.num_workers)]
# start the workers
for p in processes: p.start()
# Wait for all the workers to be done
# Display progress
progress = tqdm(total=self.num_samples)
previous = 0
while previous != self.num_samples:
val = done_number.value
diff = val - previous
if diff > 0:
progress.update(diff)
previous = val
sleep(0.1)
progress.close()
# Wait for all the workers to be done and get their allocations
for p in processes:
content = allocations_queue.get()
allocation_list.extend(content)
self.finalize(allocation_list)
self.metadata_sm.close()
self.metadata_sm.unlink()
def from_indexed_dataset(self, dataset,
indices: List[int]=None, chunksize=100,
shuffle_indices: bool = False):
"""Read dataset from an indexable dataset.
See https://docs.ffcv.io/writing_datasets.html#indexable-dataset for sample usage.
Parameters
----------
dataset: Indexable
An indexable object that implements `__getitem__` and `__len__`.
indices : List[int]
Use a subset of the dataset specified by indices.
chunksize : int
Size of chunks processed by each worker during conversion.
shuffle_indices : bool
Shuffle order of the dataset.
"""
# If the user didn't specify an order we just add samples
# sequentially
if indices is None:
indices = np.arange(len(dataset))
if shuffle_indices:
np.random.shuffle(indices)
# We add indices to the indices so that workers know where
# to write in the metadata array
indices: List[int] = list(enumerate(indices))
self._write_common(len(indices), chunks(indices, chunksize),
worker_job_indexed_dataset, (dataset, ))
def from_webdataset(self, shards: List[str], pipeline: Callable):
"""Read from webdataset-like format.
See https://docs.ffcv.io/writing_datasets.html#webdataset for sample usage.
Parameters
----------
shards: List[str]
List of shards that comprise the dataset folder.
pipeline: Callable
Called by each worker to decode. Similar to pipelines used to load webdataset.
"""
counter = partial(count_samples_in_shard, pipeline=pipeline)
lengths = thread_map(counter, shards, max_workers=self.num_workers)
total_len = sum(lengths)
offsets = np.cumsum([0] + lengths)[:-1]
todos = zip(shards, offsets)
self._write_common(total_len, todos, worker_job_webdataset, (pipeline, ))
def finalize(self, allocations) :
# Writing metadata
with open(self.fname, 'r+b') as fp:
fp.seek(self.metadata_start)
fp.write(self.metadata_sm.buf)
# We go at the end of the file
fp.seek(0, SEEK_END)
# Look at the current address
allocation_table_location = fp.tell()
# Retrieve all the allocations from the workers
# Turn them into a numpy array
try:
allocation_table = np.concatenate([
np.array(x).view(ALLOC_TABLE_TYPE) for x in allocations if len(x)
])
except:
allocation_table = np.array([]).view(ALLOC_TABLE_TYPE)
fp.write(allocation_table.tobytes())
self.header['alloc_table_ptr'] = allocation_table_location
# We go at the start of the file
fp.seek(0)
# And write the header
fp.write(self.header.tobytes())
| ffcv-main | ffcv/writer.py |
import pdb
from numba import njit, set_num_threads, prange, warnings as nwarnings, get_num_threads
from numba.core.errors import NumbaPerformanceWarning
from multiprocessing import cpu_count
import torch as ch
import warnings
class Compiler:
@classmethod
def set_enabled(cls, b):
cls.is_enabled = b
@classmethod
def set_num_threads(cls, n):
if n < 1 :
n = cpu_count()
cls.num_threads = n
set_num_threads(n)
ch.set_num_threads(n)
@classmethod
def compile(cls, code, signature=None):
parallel = False
if hasattr(code, 'is_parallel'):
parallel = code.is_parallel and cls.num_threads > 1
if cls.is_enabled:
return njit(signature, fastmath=True, nogil=True, error_model='numpy',
parallel=parallel)(code)
return code
@classmethod
def get_iterator(cls):
if cls.num_threads > 1:
return prange
else:
return range
Compiler.set_enabled(True)
Compiler.set_num_threads(1)
| ffcv-main | ffcv/pipeline/compiler.py |
from .pipeline import Pipeline
__all__ = ['Pipeline'] | ffcv-main | ffcv/pipeline/__init__.py |
from typing import Optional, Sequence, Tuple, Union
from dataclasses import dataclass
import numpy as np
import torch as ch
@dataclass(frozen=True)
class AllocationQuery:
shape: Tuple[int, ...]
dtype: Union[np.dtype, ch.dtype]
device: Optional[ch.device] = None
Allocation = Union[AllocationQuery, Sequence[AllocationQuery]] | ffcv-main | ffcv/pipeline/allocation_query.py |
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
from typing import Callable, Optional, Tuple
import numpy as np
from .state import State
from .allocation_query import AllocationQuery
if TYPE_CHECKING:
from ..fields.base import Field
class Operation(ABC):
def __init__(self):
self.metadata: np.ndarray = None
self.memory_read: Callable[[np.uint64], np.ndarray] = None
pass
def accept_field(self, field:'Field'):
self.field: 'Field' = field
def accept_globals(self, metadata, memory_read):
self.metadata = metadata
self.memory_read = memory_read
# Return the code to run this operation
@abstractmethod
def generate_code(self) -> Callable:
raise NotImplementedError
@abstractmethod
def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:
raise NotImplementedError
| ffcv-main | ffcv/pipeline/operation.py |
from typing import Any, Optional, Sequence, Mapping
import torch as ch
import numpy as np
from .state import State
from .operation import Operation
from .allocation_query import Allocation, AllocationQuery
BAD_COLLATION_MESSAGE: str = "Each pipeline needs one and one only Collate operation"
class Pipeline:
def __init__(self, operations: Sequence[Operation]):
# This is the starting state of the pipeline
self.original_state = State(jit_mode=True,
device=ch.device('cpu'),
dtype=np.dtype('u1'),
shape=None)
self.operations = operations
self.operation_blocks, _ = self.parse_pipeline()
self.compiled_ops = self.compile_ops()
# Compile the pipeline
self.compiled_code = None
def parse_pipeline(self, batch_size=16):
memory_allocations: Mapping[int, Optional[Allocation]] = {}
operation_blocs = []
current_state: State = self.original_state
current_block = []
# We read the content of the pipeline, validate and collect
# Memory allocations
for op_id, operation in enumerate(self.operations):
previous_state = current_state
current_state, memory_allocation = operation.declare_state_and_memory(
current_state)
if current_state.jit_mode != previous_state.jit_mode:
if current_block:
operation_blocs.append((previous_state.jit_mode, current_block))
current_block = [op_id]
else:
current_block.append(op_id)
memory_allocations[op_id] = memory_allocation
if current_block:
operation_blocs.append((current_state.jit_mode, current_block))
return operation_blocs, memory_allocations
def compile_ops(self):
compiled_ops = {}
for op_id, operation in enumerate(self.operations):
compiled_ops[op_id] = operation.generate_code()
return compiled_ops
def allocate_query(self, memory_allocation: AllocationQuery, batch_size: int, batches_ahead: int):
# We compute the total amount of memory needed for this
# operation
final_shape = [batches_ahead,
batch_size, *memory_allocation.shape]
if isinstance(memory_allocation.dtype, ch.dtype):
result = []
for _ in range(final_shape[0]):
partial = ch.empty(*final_shape[1:],
dtype=memory_allocation.dtype,
device=memory_allocation.device)
try:
partial = partial.pin_memory()
except:
pass
result.append(partial)
else:
ch_dtype = ch.from_numpy(np.empty(0, dtype=memory_allocation.dtype)).dtype
result = ch.empty(*final_shape,
dtype=ch_dtype)
try:
result = result.pin_memory()
except:
pass
result = result.numpy()
return result
def allocate_memory(self, batch_size: int, batches_ahead: int):
_, memory_allocations = self.parse_pipeline()
# Contains the actual allocated memory
memory_buffers: Mapping[int, Any] = {}
# For each allocation made by the operations in the pipeline
for op_id, memory_allocation in memory_allocations.items():
# If the operation didn't make a query we stop here
allocated_buffer = None
if isinstance(memory_allocation, AllocationQuery):
allocated_buffer = self.allocate_query(memory_allocation,
batch_size,
batches_ahead)
elif isinstance(memory_allocation, Sequence):
allocated_buffer = tuple(
self.allocate_query(q, batch_size, batches_ahead) for q in memory_allocation
)
memory_buffers[op_id] = allocated_buffer
return memory_buffers
| ffcv-main | ffcv/pipeline/pipeline.py |
from dataclasses import dataclass
from typing import Literal, Tuple
import torch as ch
import numpy as np
@dataclass
class State:
jit_mode: bool
device: ch.device
shape: Tuple[int, ...]
dtype: np.dtype
# Assess the validity of a pipeline stage
def __post_init__(self):
if self.jit_mode and self.device != ch.device('cpu'):
raise AssertionError("Can't be in JIT mode and on the GPU")
if self.jit_mode and isinstance(self.dtype, ch.dtype):
raise AssertionError("Can't allocate a torch tensor in JIT mode") | ffcv-main | ffcv/pipeline/state.py |
from .base import MemoryManager, MemoryContext
from .process_cache import ProcessCacheManager
from .os_cache import OSCacheManager
__all__ = ['OSCacheManager', 'ProcessCacheManager',
'MemoryManager', 'MemoryContext']
| ffcv-main | ffcv/memory_managers/__init__.py |
from typing import Sequence, TYPE_CHECKING
BATCHES_TYPE = Sequence[Sequence[int]] | ffcv-main | ffcv/memory_managers/common.py |
from abc import abstractmethod, ABCMeta, ABC
from contextlib import AbstractContextManager
from collections import defaultdict
from typing import Callable, Mapping, Sequence, Set
import numpy as np
from numba.typed import Dict
from numba import types
from ..reader import Reader
from ..pipeline.compiler import Compiler
class MemoryContext(AbstractContextManager, metaclass=ABCMeta):
@property
@abstractmethod
def state(self):
raise NotImplementedError()
@abstractmethod
def __enter__(self):
return super().__enter__()
def start_batch(self, batch:int):
pass
@abstractmethod
def __exit__(self, __exc_type, __exc_value, __traceback):
return super().__exit__(__exc_type, __exc_value, __traceback)
class MemoryManager(ABC):
def __init__(self, reader:Reader):
self.reader = reader
alloc_table = self.reader.alloc_table
# Table mapping any address in the file to the size of the data region
# That was allocated there
self.ptrs = alloc_table['ptr']
self.sizes = alloc_table['size']
order = np.argsort(self.ptrs)
# Order them so that we can use search sorted
self.ptrs = self.ptrs[order]
self.sizes = self.sizes[order]
self.ptr_to_size = dict(zip(self.ptrs, self.sizes))
# We extract the page number by shifting the address corresponding
# to the page width
page_size_bit_location = int(np.log2(reader.page_size))
page_locations = alloc_table['ptr'] >> page_size_bit_location
sample_to_pages: Mapping[int, Set[int]] = defaultdict(set)
page_to_samples: Mapping[int, Set[int]] = defaultdict(set)
# We create a mapping that goes from sample id to the pages it has data
# Stored to
# (And the same for the other way around)
for sid, pid in zip(alloc_table['sample_id'], page_locations):
sample_to_pages[sid].add(pid)
page_to_samples[pid].add(sid)
self.sample_to_pages = sample_to_pages
self.page_to_samples = page_to_samples
super().__init__()
@abstractmethod
def schedule_epoch(self, batches: Sequence[Sequence[int]]) -> MemoryContext:
raise NotImplementedError()
@abstractmethod
def compile_reader(self, address, size) -> Callable:
raise NotImplemented()
@property
@abstractmethod
def state_type(self):
raise NotImplementedError()
| ffcv-main | ffcv/memory_managers/base.py |
from typing import TYPE_CHECKING
import numpy as np
import numba as nb
from .base import MemoryManager, MemoryContext
from ..pipeline.compiler import Compiler
if TYPE_CHECKING:
from ..reader import Reader
class OSCacheContext(MemoryContext):
def __init__(self, manager:MemoryManager):
self.manager = manager
self.mmap = None
@property
def state(self):
return (self.mmap, self.manager.ptrs, self.manager.sizes)
def __enter__(self):
res = super().__enter__()
if self.mmap is None:
self.mmap = np.memmap(self.manager.reader.file_name,
'uint8', mode='r')
return res
def __exit__(self, __exc_type, __exc_value, __traceback):
# Numpy doesn't have an API to close memory maps yet
# The only thing one can do is flush it be since we are not
# Writing to it it's pointless
# Moreover we want to avoid opening the memmap over and over
# anyway.
return super().__exit__(__exc_type, __exc_value, __traceback)
class OSCacheManager(MemoryManager):
def __init__(self, reader: 'Reader'):
super().__init__(reader)
self.context = OSCacheContext(self)
def schedule_epoch(self, schedule):
return self.context
@property
def state_type(self):
t1 = nb.uint8[::1]
t1.multable = False
t2 = nb.uint64[::1]
t1.mutable = False
return nb.types.Tuple([t1, t2, t2])
def compile_reader(self):
def read(address, mem_state):
size = mem_state[2][np.searchsorted(mem_state[1], address)]
return mem_state[0][address:address + size]
return Compiler.compile(read, nb.uint8[::1](nb.uint64, self.state_type))
| ffcv-main | ffcv/memory_managers/os_cache.py |
from .context import ProcessCacheContext
from .manager import ProcessCacheManager
__all__ = ['ProcessCacheContext', 'ProcessCacheManager'] | ffcv-main | ffcv/memory_managers/process_cache/__init__.py |
from threading import Thread
from queue import Queue
import numpy as np
from ...libffcv import read
class PageReader(Thread):
def __init__(self, fname:str, queries: Queue, loaded: Queue,
memory: np.ndarray):
self.fname: str = fname
self.queries: Queue = queries
self.memory: np.ndarray = memory
self.page_size = memory.shape[1]
self.loaded: Queue = loaded
super().__init__(daemon=True)
def run(self):
import hashlib
with open(self.fname, 'rb') as handle:
fileno = handle.fileno()
while True:
query = self.queries.get()
# No more work
if query is None:
break
page_number, slot = query
offset = np.uint64(page_number * self.page_size)
length = read(fileno, self.memory[slot], offset)
# print("L", page_number, slot, hashlib.md5(self.memory[slot]).hexdigest(), self.memory[slot].ctypes.data, length)
self.loaded.put(page_number)
| ffcv-main | ffcv/memory_managers/process_cache/page_reader.py |
from collections import defaultdict
import numpy as np
from ..base import MemoryManager, MemoryContext
from ..common import BATCHES_TYPE
from .schedule import Schedule, ScheduleExecutor, compute_schedule
class ProcessCacheContext(MemoryContext):
def __init__(self, manager: MemoryManager, batches: BATCHES_TYPE):
self.manager = manager
self.fname = manager.reader.file_name
self.batches = batches
self.page_size = manager.reader.page_size
@property
def state(self):
return (self.memory, self.manager.ptrs,
self.manager.sizes, self.page_to_slot)
def __enter__(self):
pages_at_batch = []
for batch in self.batches:
pages_needed = set()
for sample_id in batch:
pages_needed.update(self.manager.sample_to_pages[sample_id])
pages_at_batch.append(pages_needed)
self.schedule = compute_schedule(pages_at_batch)
self.memory = np.zeros((self.schedule.num_slots, self.page_size),
dtype='<u1')
self.executor = ScheduleExecutor(self.fname,
self.schedule,
self.memory)
try:
max_page = max(self.schedule.page_to_slot.keys())
except ValueError:
max_page = -1
# We need max + 1 slots
# We use a table as it's O(1) indexing. Pages for the header will
# be unused however so we are losing some space
self.page_to_slot = np.zeros(max_page + np.uint32(1), dtype=np.uint32)
for p, s in self.schedule.page_to_slot.items():
self.page_to_slot[p] = s
self.executor.__enter__()
def start_batch(self, batch: int):
self.executor.load_batch(batch)
return super().start_batch(batch)
def __exit__(self, *args):
self.executor.__exit__(*args)
| ffcv-main | ffcv/memory_managers/process_cache/context.py |
from collections import defaultdict
from dataclasses import dataclass
from typing import Mapping
from queue import Queue
import numpy as np
from .page_reader import PageReader
@dataclass
class Schedule:
# Number of slots needed
num_slots: int
# Which slot to use for each page
page_to_slot: Mapping[int, int]
# First iteration a page can be loaded
can_prefetch_at: Mapping[int, int]
# Iteration at which a page *has* to be loaded
entering_at: Mapping[int, int]
# Iteration at which we can discard a page
leaving_at: Mapping[int, int]
def compute_schedule(pages_in_batch, prefetch_ahead = 3):
# We determine what is the early and latest times we will need a page
page_end = {}
page_start = {}
for b_id, pages in enumerate(pages_in_batch):
for page in pages:
page_end[page] = b_id
if page not in page_start:
page_start[page] = b_id
# We determine which pages are
# - Can be preloaded
# - Are needed
# - Can be diposed of
# At a given batch
entering_at = defaultdict(set)
can_prefetch_at = defaultdict(set)
leaving_at = defaultdict(set)
for page in page_start.keys():
prefetch_start = max(0, page_start[page] - prefetch_ahead)
can_prefetch_at[prefetch_start].add(page)
entering_at[page_start[page]].add(page)
leaving_at[page_end[page] + 1].add(page)
# We now find how many pages we need to keep in our buffer
# We also determine where which page is going to reside
next_slot = 0
page_to_slot = {}
free_slots = set()
# For each batch
for b_id in range(len(pages_in_batch)):
# First we free the pages that are leaving
for page in leaving_at[b_id]:
free_slots.add(page_to_slot[page])
# We use the prefetch timing here because we want to be able
# To start prefetching ahead of time and not overwrite a slot
# That is currently used
for page in can_prefetch_at[b_id]:
# Then we find a slot for the incoming pages
if free_slots:
# There is a slot available for this page
slot = free_slots.pop()
else:
# We have to allocate a new slot because we ran out
slot = next_slot
next_slot += 1
page_to_slot[page] = slot
return Schedule(next_slot, page_to_slot,
can_prefetch_at, entering_at, leaving_at)
class ScheduleExecutor():
def __init__(self, fname: str, schedule: Schedule,
memory: np.ndarray, num_workers: int=12):
self.fname = fname
self.schedule = schedule
self.memory = memory
self.queries = Queue()
self.loaded_queue = Queue()
self.num_workers = num_workers
self.entered = False
self.next_batch = 0
self.loaded = set()
def __enter__(self):
msg = "You can only enter a ScheduleExecutor once"
if self.entered:
raise Exception(msg)
self.entered = True
# Create the number of threads we were asked to
threads = []
for _ in range(self.num_workers):
thread = PageReader(self.fname, self.queries,
self.loaded_queue, self.memory)
thread.start()
threads.append(thread)
self.threads = threads
def __exit__(self, *_):
# Terminating the child threads
for _ in range(self.num_workers):
self.queries.put(None)
def load_batch(self, current_batch):
assert current_batch == self.next_batch
# Start prefetching everything we are allowed to
to_prefetch = self.schedule.can_prefetch_at[current_batch]
for page_to_fetch in to_prefetch:
q = (page_to_fetch, self.schedule.page_to_slot[page_to_fetch])
self.queries.put(q)
# Wait until we have all the pages we need
to_wait_for = self.schedule.entering_at[current_batch]
for page in to_wait_for:
while page not in self.loaded:
next_loaded = self.loaded_queue.get()
self.loaded.add(next_loaded)
# We enforce that we read in order otherwise our
# assumptions are broken
self.next_batch = current_batch + 1
| ffcv-main | ffcv/memory_managers/process_cache/schedule.py |
import numba as nb
import numpy as np
from .context import ProcessCacheContext
from ...pipeline.compiler import Compiler
from ..base import MemoryManager, MemoryContext
from ..common import BATCHES_TYPE
class ProcessCacheManager(MemoryManager):
def schedule_epoch(self, batches: BATCHES_TYPE) -> MemoryContext:
return ProcessCacheContext(self, batches)
@property
def state_type(self):
# The data
t1 = nb.uint8[:, ::1]
t1.mutable = False
# The pointers
t2 = nb.uint64[::1]
t2.mutable = False
#
# Their size
t3 = nb.uint64[::1]
t3.mutable = False
# Page to slot
t4 = nb.uint32[::1]
t4.mutable = False
return nb.types.Tuple([t1, t2, t3, t4])
def compile_reader(self):
page_size = self.reader.page_size
page_size_log2 = np.uint32(np.log2(page_size))
def read(address, mem_state):
size = mem_state[2][np.searchsorted(mem_state[1], address)]
page = address >> page_size_log2
offset = address - (page << page_size_log2)
page_slot = mem_state[3][page]
return mem_state[0][page_slot, offset:offset + size]
return Compiler.compile(read)
| ffcv-main | ffcv/memory_managers/process_cache/manager.py |
import random
from typing import Sequence, TYPE_CHECKING
from numba import njit
import numpy as np
from torch.utils.data import DistributedSampler
from .base import TraversalOrder
if TYPE_CHECKING:
from ..loader.loader import Loader
@njit(parallel=False)
def generate_order_inner(seed, page_to_samples_array, page_sizes,
result, buffer_size=6):
num_pages = len(page_sizes)
random.seed(seed)
np.random.seed(seed)
current_pages = [0]
current_pages.remove(0) # Force the type
for page_ix in range(num_pages):
page_size = page_sizes[page_ix]
random.shuffle(page_to_samples_array[page_ix, :page_size])
next_page = 0
page_order = np.random.permutation(num_pages)
samples_consumed = np.zeros_like(page_sizes)
for s_ix in range(result.shape[0]):
while next_page < num_pages and len(current_pages) < buffer_size:
page_to_add = page_order[next_page]
if page_sizes[page_to_add] > 0:
current_pages.append(page_order[next_page])
next_page += 1
selected_page_ix = np.random.randint(0, len(current_pages))
page = current_pages[selected_page_ix]
result[s_ix] = page_to_samples_array[page, samples_consumed[page]]
samples_consumed[page] += 1
if samples_consumed[page] >= page_sizes[page]:
current_pages.remove(page)
class QuasiRandom(TraversalOrder):
def __init__(self, loader: 'Loader'):
super().__init__(loader)
# TODO filter only the samples we care about!!
self.page_to_samples = loader.memory_manager.page_to_samples
if not self.page_to_samples:
raise ValueError(
"Dataset won't benefit from QuasiRandom order, use regular Random")
if self.distributed:
raise NotImplementedError(
"distributed Not implemented yet for QuasiRandom")
self.prepare_data_structures()
def prepare_data_structures(self):
index_set = set(self.indices)
max_size = max(len(y) for y in self.page_to_samples.values())
num_pages = max(k for k in self.page_to_samples.keys()) + np.uint64(1)
self.page_to_samples_array = np.empty((num_pages, max_size),
dtype=np.int64)
self.page_sizes = np.zeros(num_pages, dtype=np.int64)
for page, content in self.page_to_samples.items():
for c in content:
if c in index_set:
self.page_to_samples_array[page][self.page_sizes[page]] = c
self.page_sizes[page] += 1
def sample_order(self, epoch: int) -> Sequence[int]:
seed = self.seed * 912300 + epoch
result_order = np.zeros(len(self.indices), dtype=np.int64)
generate_order_inner(seed, self.page_to_samples_array,
self.page_sizes,
result_order,
2*self.loader.batch_size)
return result_order | ffcv-main | ffcv/traversal_order/quasi_random.py |
from .sequential import Sequential
from .random import Random
from .quasi_random import QuasiRandom
__all__ = ['Sequential', 'Random', "QuasiRandom"] | ffcv-main | ffcv/traversal_order/__init__.py |
from typing import Sequence
import numpy as np
from torch.utils.data import DistributedSampler
from .base import TraversalOrder
class Random(TraversalOrder):
def __init__(self, loader:'Loader'):
super().__init__(loader)
if self.distributed:
self.sampler = DistributedSampler(self.indices,
shuffle=True,
seed=self.seed,
drop_last=False)
def sample_order(self, epoch: int) -> Sequence[int]:
if not self.distributed:
generator = np.random.default_rng(self.seed + epoch if self.seed is not None else None)
return generator.permutation(self.indices)
self.sampler.set_epoch(epoch)
return self.indices[np.array(list(self.sampler))]
| ffcv-main | ffcv/traversal_order/random.py |
from typing import Sequence, TYPE_CHECKING
import numpy as np
from torch.utils.data import DistributedSampler
from .base import TraversalOrder
if TYPE_CHECKING:
from ..loader.loader import Loader
class Sequential(TraversalOrder):
def __init__(self, loader:'Loader'):
super().__init__(loader)
if self.distributed:
self.sampler = DistributedSampler(self.indices,
shuffle=False,
seed=self.seed,
drop_last=False)
def sample_order(self, epoch: int) -> Sequence[int]:
if not self.distributed:
return self.indices
self.sampler.set_epoch(epoch)
return self.indices[np.array(list(self.sampler))]
| ffcv-main | ffcv/traversal_order/sequential.py |
from abc import ABC, abstractmethod
from typing import Sequence
from ..reader import Reader
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..loader.main_thread import Loader
class TraversalOrder(ABC):
def __init__(self, loader: 'Loader'):
self.loader = loader
self.indices = self.loader.indices
self.seed = self.loader.seed
self.distributed = loader.distributed
@abstractmethod
def sample_order(self, epoch:int) -> Sequence[int]:
raise NotImplemented() | ffcv-main | ffcv/traversal_order/base.py |
"""
Random translate
"""
import numpy as np
from numpy.random import randint
from typing import Callable, Optional, Tuple
from dataclasses import replace
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
class RandomTranslate(Operation):
"""Translate each image randomly in vertical and horizontal directions
up to specified number of pixels.
Parameters
----------
padding : int
Max number of pixels to translate in any direction.
fill : tuple
An RGB color ((0, 0, 0) by default) to fill the area outside the shifted image.
"""
def __init__(self, padding: int, fill: Tuple[int, int, int] = (0, 0, 0)):
super().__init__()
self.padding = padding
self.fill = np.array(fill)
def generate_code(self) -> Callable:
my_range = Compiler.get_iterator()
pad = self.padding
def translate(images, dst):
n, h, w, _ = images.shape
# y_coords = randint(low=0, high=2 * pad + 1, size=(n,))
# x_coords = randint(low=0, high=2 * pad + 1, size=(n,))
# dst = fill
dst[:, pad:pad+h, pad:pad+w] = images
for i in my_range(n):
y_coord = randint(low=0, high=2 * pad + 1)
x_coord = randint(low=0, high=2 * pad + 1)
# images[i] = dst[i, y_coords[i]:y_coords[i]+h, x_coords[i]:x_coords[i]+w]
images[i] = dst[i, y_coord:y_coord+h, x_coord:x_coord+w]
return images
translate.is_parallel = True
return translate
def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:
h, w, c = previous_state.shape
return (replace(previous_state, jit_mode=True), \
AllocationQuery((h + 2 * self.padding, w + 2 * self.padding, c), previous_state.dtype))
| ffcv-main | ffcv/transforms/translate.py |
"""
Mixup augmentation for images and labels (https://arxiv.org/abs/1710.09412)
"""
from typing import Tuple
from numba import objmode
import numpy as np
import torch as ch
import torch.nn.functional as F
from dataclasses import replace
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
class ImageMixup(Operation):
"""Mixup for images. Operates on raw arrays (not tensors).
Parameters
----------
alpha : float
Mixup parameter alpha
same_lambda : bool
Whether to use the same value of lambda across the whole batch, or an
individually sampled lambda per image in the batch
"""
def __init__(self, alpha: float, same_lambda: bool):
super().__init__()
self.alpha = alpha
self.same_lambda = same_lambda
def generate_code(self) -> Callable:
alpha = self.alpha
same_lam = self.same_lambda
my_range = Compiler.get_iterator()
def mixer(images, dst, indices):
np.random.seed(indices[-1])
num_images = images.shape[0]
lam = np.random.beta(alpha, alpha) if same_lam else \
np.random.beta(alpha, alpha, num_images)
for ix in my_range(num_images):
l = lam if same_lam else lam[ix]
dst[ix] = l * images[ix] + (1 - l) * images[ix - 1]
return dst
mixer.is_parallel = True
mixer.with_indices = True
return mixer
def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:
return (previous_state, AllocationQuery(shape=previous_state.shape,
dtype=previous_state.dtype))
class LabelMixup(Operation):
"""Mixup for labels. Should be initialized in exactly the same way as
:cla:`ffcv.transforms.ImageMixup`.
"""
def __init__(self, alpha: float, same_lambda: bool):
super().__init__()
self.alpha = alpha
self.same_lambda = same_lambda
def generate_code(self) -> Callable:
alpha = self.alpha
same_lam = self.same_lambda
my_range = Compiler.get_iterator()
def mixer(labels, temp_array, indices):
num_labels = labels.shape[0]
# permutation = np.random.permutation(num_labels)
np.random.seed(indices[-1])
lam = np.random.beta(alpha, alpha) if same_lam else \
np.random.beta(alpha, alpha, num_labels)
for ix in my_range(num_labels):
temp_array[ix, 0] = labels[ix][0]
temp_array[ix, 1] = labels[ix - 1][0]
temp_array[ix, 2] = lam if same_lam else lam[ix]
return temp_array
mixer.is_parallel = True
mixer.with_indices = True
return mixer
def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:
return (replace(previous_state, shape=(3,), dtype=np.float32),
AllocationQuery((3,), dtype=np.float32))
class MixupToOneHot(Operation):
def __init__(self, num_classes: int):
super().__init__()
self.num_classes = num_classes
def generate_code(self) -> Callable:
def one_hotter(mixedup_labels, dst):
dst.zero_()
N = mixedup_labels.shape[0]
dst[ch.arange(N), mixedup_labels[:, 0].long()] = mixedup_labels[:, 2]
mixedup_labels[:, 2] *= -1
mixedup_labels[:, 2] += 1
dst[ch.arange(N), mixedup_labels[:, 1].long()] = mixedup_labels[:, 2]
return dst
return one_hotter
def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:
# Should already be converted to tensor
assert not previous_state.jit_mode
return (replace(previous_state, shape=(self.num_classes,)), \
AllocationQuery((self.num_classes,), dtype=previous_state.dtype, device=previous_state.device)) | ffcv-main | ffcv/transforms/mixup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.