python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check whether a layout is valid under Mesh TensorFlow.
Not all layouts can be used to lower a Mesh TensorFlow graph. Some Mesh
TensorFlow operations error when a certain Mesh TensorFlow dimension is assigned
to a mesh dimension (e.g. mtf.ConcatOperation with its concatenation dimension).
A Mesh TensorFlow dimension can only be assigned to a mesh dimension if the
former's size is evenly divisible by the latter's size. This module provides
methods to check these conditions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
import re
class LayoutValidator(object):
"""Validates potential Mesh TensorFlow layouts.
Usage Example:
mtf_graph = mtf.Graph()
# Add operations to mtf_graph using Mesh TensorFlow.
mesh_shape = mtf.Shape([("m1", 4), ("m2", 2)])
layout_validator = valid_layouts.LayoutValidator(mtf_graph, mesh_shape)
print(layout_validator.splittable_mtf_dimension_names)
# Set of names of Mesh TensorFlow dimensions that may be assigned to mesh
# dimensions.
print(layout_validator.is_valid_assignment("batch", "m1"))
# Whether the 'batch' Mesh TensorFlow dimension may be assigned to the 'm1'
# mesh dimension. Unlike the previous method, this ensures that every
# occurrence of the 'batch' dimension has a size that is evenly divisible by
# the size of 'm1'.
Attributes:
splittable_mtf_dimension_names: a set(string) of the names of MTF dimensions
that may be assigned in a layout.
mesh_dimension_name_to_size: a {string: int}, mapping names of mesh
dimensions to their size.
"""
def __init__(self, mtf_graph, mesh_shape):
"""Initializer.
Args:
mtf_graph: an mtf.Graph, representing the Mesh TensorFlow computation of
interest.
mesh_shape: an mtf.Shape, representing the mesh of interest.
"""
self._splittable_mtf_dimension_names = self._initialize_splittable_dimensions(
mtf_graph)
self._mtf_dimension_name_to_size_gcd = (
self._initialize_mtf_dimension_name_to_size_gcd(mtf_graph))
self._mesh_dimension_name_to_size = self._initialize_mesh_dimension_name_to_size(
mesh_shape)
@property
def splittable_mtf_dimension_names(self):
return self._splittable_mtf_dimension_names
@property
def mesh_dimension_name_to_size(self):
return self._mesh_dimension_name_to_size
def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):
"""Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid.
"""
return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and
(self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %
self._mesh_dimension_name_to_size[mesh_dimension_name] == 0))
def _initialize_splittable_dimensions(self, mtf_graph):
"""Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout.
"""
all_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
all_mtf_dimension_names.add(mtf_dimension.name)
unsplittable_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)
return all_mtf_dimension_names - unsplittable_mtf_dimension_names
def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):
"""Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x.
"""
mtf_dimension_name_to_size_gcd = {}
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(
mtf_dimension_name_to_size_gcd.get(mtf_dimension.name,
mtf_dimension.size),
mtf_dimension.size)
return mtf_dimension_name_to_size_gcd
def _initialize_mesh_dimension_name_to_size(self, mesh_shape):
"""Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes.
"""
mesh_dimension_name_to_size = {} # {string: int}
for mesh_dimension in mesh_shape.dims:
mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size
return mesh_dimension_name_to_size
| mesh-master | mesh_tensorflow/auto_mtf/valid_layouts.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.cost_estimator.memory_estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import memory_estimator
import tensorflow.compat.v1 as tf
class MemoryEstimatorTest(tf.test.TestCase):
def setUp(self):
super(MemoryEstimatorTest, self).setUp()
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, 'lowering_context_mesh')
a_dim = mtf.Dimension('a', 3)
b_dim = mtf.Dimension('b', 4)
c_dim = mtf.Dimension('c', 5)
x = (mtf.Constant(mesh, 0, mtf.Shape([a_dim, b_dim]), tf.int32, 'X')
.outputs[0])
y = (mtf.Constant(mesh, 0, mtf.Shape([b_dim, c_dim]), tf.int32, 'Y')
.outputs[0])
z = (mtf.EinsumOperation([x, y], mtf.Shape([a_dim, c_dim]), name='Z')
.outputs[0])
mesh_shape = mtf.Shape([('m1', 4), ('m2', 3)])
self.estimator = memory_estimator.MemoryEstimator(
mtf_graph, mesh_shape, [z])
def test_LayoutValidator(self):
validator = self.estimator.get_layout_validator()
self.assertCountEqual(validator.splittable_mtf_dimension_names,
['a', 'b', 'c'])
self.assertFalse(validator.is_valid_assignment('a', 'm1'))
self.assertTrue(validator.is_valid_assignment('a', 'm2'))
def test_GraphInterface(self):
graph = self.estimator.get_graph_interface()
self.assertCountEqual(list(graph.get_all_operation_names()),
['X', 'Y', 'Z'])
self.assertEqual(graph.get_tensor_shape('X:0'), tf.TensorShape([3, 4]))
self.assertEqual(graph.get_tensor_shape('Z:0'), tf.TensorShape([3, 5]))
self.assertFalse(graph.is_tensor_final('Y:0'))
self.assertTrue(graph.is_tensor_final('Z:0'))
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/auto_mtf/memory_estimator_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auto MeshTensorflow subpackage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mesh_tensorflow.auto_mtf import api
from mesh_tensorflow.auto_mtf import graph_interface
from mesh_tensorflow.auto_mtf import layout_optimizer
from mesh_tensorflow.auto_mtf import memory_estimator
from mesh_tensorflow.auto_mtf import print_cp_model_solution
from mesh_tensorflow.auto_mtf import scheduler
from mesh_tensorflow.auto_mtf import valid_layouts
from mesh_tensorflow.auto_mtf.api import layout
from mesh_tensorflow.auto_mtf.api import layout_and_mesh_shape
| mesh-master | mesh_tensorflow/auto_mtf/__init__.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.layout_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import layout_optimizer
from mesh_tensorflow.auto_mtf import memory_estimator
import six
import tensorflow.compat.v1 as tf
class VariableNamesTest(tf.test.TestCase):
def testGlobalVarName(self):
self.assertEqual("x_(cake:lie)",
layout_optimizer._global_var_name("cake", "lie"))
def testLocalVarName(self):
self.assertEqual("y_()", layout_optimizer._local_var_name(frozenset(), {}))
self.assertEqual(
"y_(channel:y,hidden:x)",
layout_optimizer._local_var_name(frozenset(["channel", "hidden"]),
{"hidden": "x", "channel": "y"}))
self.assertEqual(
"y_(channel:y,hidden)",
layout_optimizer._local_var_name(frozenset(["channel", "hidden"]),
{"channel": "y"}))
class AssignmentsTest(tf.test.TestCase):
def testGenerateAssignments(self):
splittable_dims = {"s1", "s2", "s3"}
mesh_dims = {"m1": 4, "m2": 8}
assignments = layout_optimizer._generate_assignments(splittable_dims,
mesh_dims)
# Check that some valid assignments of various sizes are included
self.assertIn({}, assignments)
self.assertIn({"s3": "m2"}, assignments)
self.assertIn({"s1": "m2", "s2": "m1"}, assignments)
# Not allowed to map two splittable dimensions to the same mesh dimension.
self.assertNotIn({"s1": "m2", "s3": "m2"}, assignments)
# Check the total number of assignments returned. We are looking for
# thirteen because one assignment has no entries, six assignments have one
# entry, and six assignments have two entries.
self.assertLen(assignments, 13)
class OptimizeLayoutTest(tf.test.TestCase):
def setUp(self):
super(OptimizeLayoutTest, self).setUp()
self.mtf_graph = mtf.Graph()
self.mesh = mtf.Mesh(self.mtf_graph, "my_mesh")
self.mesh_shape = mtf.convert_to_shape("m1:4,m2:2")
def get_layout_optimizer(self):
return layout_optimizer.LayoutOptimizer(memory_estimator.MemoryEstimator(
self.mtf_graph, self.mesh_shape))
def testOptimizeLayout(self):
x1 = mtf.zeros(self.mesh, "a:10,b:5")
x2 = mtf.zeros(self.mesh, "b:5,c:20")
mtf.einsum([x1, x2], "a:10,c:20")
optimizer = self.get_layout_optimizer()
# Cut dimensions to make them equally sized.
layout = optimizer.solve()
self.assertEqual(layout, "a:m2;c:m1")
# This optimal layout should have the lowest value.
layout_value = optimizer.evaluate_layout(layout)
self.assertLessEqual(layout_value, optimizer.evaluate_layout("a:m1;b:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("a:m1;c:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("b:m1;a:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("b:m1;c:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("c:m1;b:m2"))
self.assertEqual(layout_value, optimizer.evaluate_layout("c:m1;a:m2"))
def testOptimizeLayoutRepetition(self):
x1 = mtf.zeros(self.mesh, "a:10,b:5")
x2 = mtf.zeros(self.mesh, "b:5,c:20")
for _ in six.moves.xrange(100):
mtf.einsum([x1, x2], "a:10,c:20")
optimizer = self.get_layout_optimizer()
self.assertGreaterEqual(len(list(
optimizer._graph.get_all_operation_names())), 50)
self.assertLessEqual(len(optimizer._model.Proto().variables), 50)
# Same checks.
layout = optimizer.solve()
self.assertEqual(layout, "a:m2;c:m1")
layout_value = optimizer.evaluate_layout(layout)
self.assertLessEqual(layout_value, optimizer.evaluate_layout("a:m1;b:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("a:m1;c:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("b:m1;a:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("b:m1;c:m2"))
self.assertLessEqual(layout_value, optimizer.evaluate_layout("c:m1;b:m2"))
self.assertEqual(layout_value, optimizer.evaluate_layout("c:m1;a:m2"))
def testOptimizeLayoutUnsplittable(self):
x1 = mtf.zeros(self.mesh, "a:10,b:5")
x2 = mtf.zeros(self.mesh, "b:5,c:20")
mtf.UnstackOperation(x1, mtf.Dimension("a", 10))
mtf.UnstackOperation(x2, mtf.Dimension("c", 20))
optimizer = self.get_layout_optimizer()
# No dimensions can be split, because a and c are unstack dimensions and
# b has size 5 (so there are divisiblity issues).
self.assertEqual(optimizer.solve(), "")
def testOptimizeLayoutTiebreak(self):
x1 = mtf.zeros(self.mesh, "a:10,b:5")
x2 = mtf.zeros(self.mesh, "b:5,c:20")
mtf.einsum([x1, x2], "a:10,c:20")
# Rewrite mesh_shape to have a dummy dimension.
self.mesh_shape = mtf.convert_to_shape("m1:4,m2:2,m3:1")
optimizer = self.get_layout_optimizer()
layout = optimizer.solve()
self.assertEqual(layout, "a:m2;b:m3;c:m1")
# TODO(joshuawang): Add test to ensure only a single device"s worth of memory is
# being measured.
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/auto_mtf/layout_optimizer_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper function to automatically compute a layout.
Sample Usage:
import mesh_tensorflow as mtf
import mesh_tensorflow.auto_mtf
# Construct a Mesh TensorFlow graph and mesh.
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, "my_mesh")
x = mtf.zeros(self.mesh, "a:10,b:5")
y = mtf.zeros(self.mesh, "b:5,c:20")
z = mtf.einsum([x, y], "a:10,c:20")
# Compute a layout and mesh shape based on graph and 8 machines.
# Note that knowing the identity of the outputs is important to the
# optimization since they cannot be freed.
layout, mesh_shape = mtf.auto_mtf.layout_and_mesh_Shape(mtf_graph, 8, [z])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import layout_optimizer
from mesh_tensorflow.auto_mtf import memory_estimator
import tensorflow.compat.v1 as tf
def layout(mtf_graph, mesh_shape, mtf_outputs=()):
"""Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules
"""
mesh_shape = mtf.convert_to_shape(mesh_shape)
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
return mtf.convert_to_layout_rules(optimizer.solve())
def layout_and_mesh_shape(mtf_graph, num_machines, mtf_outputs=(),
max_mesh_shape_dimensions=2):
"""Compute layout rules and mesh shape based on computational graph.
Brute-forces over all possible mesh shapes to find a (layout, mesh_shape)
pair. Note that the layout optimizer is more efficient when the mesh_shape has
fewer dimensions, so a smaller max_mesh_shape_dimensions makes this call
faster.
Args:
mtf_graph: a mtf.Graph.
num_machines: integer, a power of two, the number of machines available.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
max_mesh_shape_dimensions: optional integer, the maximum number of
dimensions to consider in any layout. For example, num_machines=1024 and
max_mesh_shape_dimensions=2 results in testing the mesh shapes
"mesh_0:1024", "mesh_0:512;mesh_1:2", "mesh_0:256;mesh_1:4",
"mesh_0:128;mesh_1:8", "mesh_0:64;mesh_1:16", and "mesh_0:32;mesh_1:32".
If set to None, there is no maximum.
Returns:
a (mtf.LayoutRules, mtf.Shape) tuple.
"""
best_layout_and_mesh_shape = (None, None)
best_value = None
for mesh_shape_list in _mesh_shape_iterator(num_machines,
max_mesh_shape_dimensions):
mesh_shape = mtf.Shape([mtf.Dimension("mesh_{}".format(i), size)
for i, size in enumerate(mesh_shape_list)])
tf.logging.info("Computing layout for mesh shape: {}".format(mesh_shape))
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
layout_string = optimizer.solve()
value = optimizer.evaluate_layout(layout_string)
if best_value is None or value < best_value:
best_value = value
best_layout_and_mesh_shape = (mtf.convert_to_layout_rules(layout_string),
mesh_shape)
return best_layout_and_mesh_shape
def _mesh_shape_iterator(num_machines, max_mesh_shape_dimensions=None):
"""Iterable of mesh shapes that use a certain number of machines.
Args:
num_machines: integer, a power of two, the number of machines available.
max_mesh_shape_dimensions: optional integer, the maximum number of
dimensions to consider in any layout.
Yields:
[int], the dimension sizes of a mesh shape.
"""
if num_machines == 1:
yield [1]
return
current_product = num_machines
mesh_shape = [num_machines]
while True:
if (max_mesh_shape_dimensions is None
or len(mesh_shape) <= max_mesh_shape_dimensions):
yield list(mesh_shape)
while mesh_shape[-1] == 2:
current_product //= mesh_shape.pop()
if not mesh_shape:
return
mesh_shape[-1] //= 2
current_product //= 2
while current_product < num_machines:
mesh_shape.append(min(mesh_shape[-1], num_machines // current_product))
current_product *= mesh_shape[-1]
| mesh-master | mesh_tensorflow/auto_mtf/api.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience method to print information about a pywrapcp solver's solution.
Sample Usage:
model = pywrapcp.CpModel()
# Input variables, constraints, and objective into model.
solver = pywrapcp.CpSolver()
status = solver.Solve(model)
# Check the status returned by solver.
print_pywrapcp_solution.print_solution(model, solver)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def print_solution(model, solver):
"""Prints the solution associated with solver.
If solver has already had Solve() called on it, prints the solution. This
includes each variable and its assignment, along with the objective function
and its optimal value.
If solver has not had Solve() called on it, or there is no feasible solution,
this will probably crash.
Args:
model: A pywrapcp.CpModel object.
solver: A pywrapcp.CpSolver object.
Returns:
Nothing, but prints the solution associated with solver.
"""
model_proto = model.Proto()
response_proto = solver.ResponseProto()
variables_in_objective_map = {}
maximization = False
if model_proto.HasField('objective'):
objective = model_proto.objective
for i in range(len(objective.vars)):
variables_in_objective_map[objective.vars[i]] = objective.coeffs[i]
if objective.scaling_factor < 0.0:
maximization = True
variable_assignments = []
variables_in_objective = []
num_vars = len(model_proto.variables)
for var_index in range(num_vars):
if not model_proto.variables[var_index].name:
continue
variable_name = model_proto.variables[var_index].name
if var_index in variables_in_objective_map:
coefficient = variables_in_objective_map[var_index]
if coefficient:
if maximization:
coefficient *= -1
if coefficient < 0:
variables_in_objective.append(' - {} * {}'.format(
-coefficient, variable_name))
elif coefficient > 0:
variables_in_objective.append(' + {} * {}'.format(
coefficient, variable_name))
variable_assignments.append(' {} = {}\n'.format(
variable_name, response_proto.solution[var_index]))
print(''.join(variable_assignments), end='')
# Strip the leading '+' if it exists.
if variables_in_objective and variables_in_objective[0][1] == '+':
variables_in_objective[0] = variables_in_objective[0][2:]
print('{}:{}'.format('Maximize' if maximization else 'Minimize',
''.join(variables_in_objective)))
print('Objective value: {}\n'.format(solver.ObjectiveValue()))
| mesh-master | mesh_tensorflow/auto_mtf/print_cp_model_solution.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to estimate the memory cost of a MTF computation.
We would like to estimate the footprint of computing a Mesh TensorFlow model.
Unfortunately, the size of the Mesh TensorFlow tensors isn't the full story;
a single Mesh TensorFlow operation with a single output tensor might lower into
multiple TensorFlow operations and multiple (temporary and output) TensorFlow
tensors.
However, the Mesh TensorFlow tensors serve as a quick, rough approximation to
the final memory usage. The base MemoryEstimator class uses these tensors to
compute a GraphInterface, without needing to lower the graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mesh_tensorflow.auto_mtf import graph_interface
from mesh_tensorflow.auto_mtf import valid_layouts
class MemoryEstimator(object):
"""Estimates memory cost of a MTF graph based on the size of MTF tensors.
Usage Example:
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape)
layout_validator = estimator.get_layout_validator()
graph = estimator.get_graph_interface()
Attributes:
mtf_graph: an mtf.Graph, see argument in __init__.
mesh_shape: an mtf.Shape, see argument in __init__.
mtf_outputs: an iterable of mtf.Tensor, see argument in __init__.
"""
def __init__(self, mtf_graph, mesh_shape, mtf_outputs=()):
"""Initializer.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
"""
self.mtf_graph = mtf_graph
self.mesh_shape = mesh_shape
self.mtf_outputs = mtf_outputs
self._layout_validator = None # valid_layouts.LayoutValidator
self._graph_interface = None # graph_interface.GraphInterface
def get_layout_validator(self):
"""LayoutValidator for the model and mesh_shape.
Returns:
a valid_layouts.LayoutValidator
"""
if self._layout_validator is None:
self._compute_layout_validator()
return self._layout_validator
def get_graph_interface(self):
"""GraphInterface representation of the model's computation graph.
Returns:
a graph_interface.GraphInterface
"""
if self._graph_interface is None:
self._compute_graph_interface()
return self._graph_interface
def _compute_layout_validator(self):
"""Computes self._layout_validator."""
self._layout_validator = valid_layouts.LayoutValidator(self.mtf_graph,
self.mesh_shape)
def _compute_graph_interface(self):
"""Computes self._graph_interface."""
self._graph_interface = graph_interface.GraphInterface(self.mtf_graph)
for mtf_output in self.mtf_outputs:
self._graph_interface.set_tensor_final(mtf_output.name)
| mesh-master | mesh_tensorflow/auto_mtf/memory_estimator.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.auto_mtf.valid_layouts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import valid_layouts
import tensorflow.compat.v1 as tf
class LayoutValidatorTest(tf.test.TestCase):
def setUp(self):
super(LayoutValidatorTest, self).setUp()
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
a_dim = mtf.Dimension("a", 5)
b_dim = mtf.Dimension("b", 10)
concat_dim1 = mtf.Dimension("concat", 15)
concat_dim2 = mtf.Dimension("concat", 20)
x1 = mtf.zeros(mesh, mtf.Shape([a_dim, b_dim, concat_dim1]))
x2 = mtf.zeros(mesh, mtf.Shape([a_dim, b_dim, concat_dim2]))
mtf.ConcatOperation([x1, x2], "concat")
# We add a tensor with anonymous shape, which is supposed to be
# unsplittable (i.e. none of its dimensions show up during
# test_SplittableMtfDimensionNames).
_ = mtf.zeros(mesh, mtf.anonymous_shape(mtf.Shape([a_dim, b_dim])))
mesh_shape = mtf.Shape([("m1", 4), ("m2", 2)])
self.valid_layouts = valid_layouts.LayoutValidator(graph, mesh_shape)
def test_SplittableMtfDimensionNames(self):
self.assertEqual(self.valid_layouts.splittable_mtf_dimension_names,
set(["a", "b"]))
def test_MeshDimensionNameToSize(self):
self.assertEqual(self.valid_layouts.mesh_dimension_name_to_size,
{"m1": 4, "m2": 2})
def test_is_valid_assignment(self):
# Due to divisibility, the a dimension cannot be assigned to m1 or m2.
self.assertFalse(self.valid_layouts.is_valid_assignment("a", "m1"))
self.assertFalse(self.valid_layouts.is_valid_assignment("a", "m2"))
# The b dimension can only be assigned to m2.
self.assertFalse(self.valid_layouts.is_valid_assignment("b", "m1"))
self.assertTrue(self.valid_layouts.is_valid_assignment("b", "m2"))
# Due to ConcatOperation, the concat dimension may not be assigned.
self.assertFalse(self.valid_layouts.is_valid_assignment("concat", "m1"))
self.assertFalse(self.valid_layouts.is_valid_assignment("concat", "m2"))
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/auto_mtf/valid_layouts_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute schedules to minimize peak memory usage.
Implementation of alternative methods to compute schedules for the layout
optimizer.
Sample Usage:
# Construct Mesh TensorFlow graph, mtf_graph.
graph = mtf.auto_mtf.graph_interface.GraphInterface(mtf_graph)
schedule = scheduler.MinimizePeakMemory(graph, 'LIST')
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import six
def minimize_peak_memory(graph, scheduler_alg):
"""Computes a schedule to minimize peak memory.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
scheduler_alg: a string, one of 'NAIVE' or 'LIST'
Returns:
an iterable of integers representing the schedule.
"""
if scheduler_alg == 'NAIVE':
return _minimize_peak_memory_naive(graph)
elif scheduler_alg == 'LIST':
return _minimize_peak_memory_list(graph)
else:
raise NotImplementedError('{} is not a scheduler algorithm. It should be '
'one of NAIVE or LIST.'
.format(scheduler_alg))
def _minimize_peak_memory_naive(graph):
"""Computes the naive schedule [0, 1, 2, ...].
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule.
"""
return six.moves.range(graph.get_num_operations())
def _minimize_peak_memory_list(graph):
"""Computes schedule according to the greedy list heuristic.
Greedy list heuristic: schedule the operation which results in the most bytes
of memory being (immediately) freed.
TODO(joshuawang): Experiment with tiebreaking by preferring more successors.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule.
"""
schedule = []
bytes_freed = {} # {operation_name: bytes freed}
users_of = collections.defaultdict(set) # {tensor_name: set(operation_name)}
in_degree = collections.defaultdict(int) # {operation_name: in degree}
operation_id = {} # {operation_name: id}
# We want an updatable priority queue, so we use the following workaround:
# docs.python.org/2/library/heapq.html#priority-queue-implementation-notes
priority_queue = [] # (negative bytes freed, operation name)
# Set up the (greedy) topological sort.
for i, operation_name in enumerate(graph.get_all_operation_names()):
operation_id[operation_name] = i
for input_name in graph.get_operation_input_names(operation_name):
# Note that in _HybridGraphInterface, an operation may use a tensor twice,
# but we deduplicate (with respect to in_degree) so that we can later use
# users_of to decrement in_degree.
if operation_name in users_of[input_name]:
continue
users_of[input_name].add(operation_name)
in_degree[operation_name] += 1
for operation_name in graph.get_all_operation_names():
bytes_freed[operation_name] = 0
# For each input, this operation frees memory if it is the final consumer.
for input_name in graph.get_operation_input_names(operation_name):
if len(users_of[input_name]) == 1 and not graph.is_tensor_final(
input_name):
bytes_freed[operation_name] += graph.get_tensor_size(input_name)
# For each output, this operation will require additional bytes of memory
# (hence negative bytes freed).
for output_name in graph.get_operation_output_names(operation_name):
# If the output is used (or is final), then it eats memory.
if users_of[output_name] or graph.is_tensor_final(output_name):
bytes_freed[operation_name] -= graph.get_tensor_size(output_name)
for operation_name in graph.get_all_operation_names():
if in_degree[operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[operation_name], operation_name))
# Do the (greedy) topological sort.
while priority_queue:
neg_bytes_freed, operation_name = heapq.heappop(priority_queue)
if bytes_freed[operation_name] != -neg_bytes_freed:
continue
schedule.append(operation_id[operation_name])
bytes_freed[operation_name] = None
for output_name in graph.get_operation_output_names(operation_name):
for other_operation_name in users_of[output_name]:
in_degree[other_operation_name] -= 1
if in_degree[other_operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[other_operation_name],
other_operation_name))
for input_name in graph.get_operation_input_names(operation_name):
if operation_name not in users_of[input_name]:
# Used twice by this operation and hence already removed.
continue
users_of[input_name].remove(operation_name)
if len(users_of[input_name]) != 1 or graph.is_tensor_final(output_name):
continue
(other_operation_name,) = users_of[input_name]
bytes_freed[other_operation_name] += graph.get_tensor_size(
input_name)
if in_degree[other_operation_name] > 0:
continue
# Push another copy into the priority queue with our updated value.
# The original copy will be ignored since it does not match bytes_freed.
heapq.heappush(priority_queue, (-bytes_freed[other_operation_name],
other_operation_name))
return schedule
| mesh-master | mesh_tensorflow/auto_mtf/scheduler.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.auto_mtf.graph_interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import graph_interface
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import cost_graph_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
class GraphInterfaceTest(tf.test.TestCase):
def setUp(self):
super(GraphInterfaceTest, self).setUp()
self._cost_graph = cost_graph_pb2.CostGraphDef(
node=[
cost_graph_pb2.CostGraphDef.Node(
name="X",
device="/device:CPU:0",
id=0,
output_info=[
cost_graph_pb2.CostGraphDef.Node.OutputInfo(
size=48,
alias_input_port=-1,
dtype=types_pb2.DT_INT32,
shape=tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=3),
tensor_shape_pb2.TensorShapeProto.Dim(size=4),
]
)
),
],
),
cost_graph_pb2.CostGraphDef.Node(
name="Y",
device="/device:CPU:0",
id=1,
output_info=[
cost_graph_pb2.CostGraphDef.Node.OutputInfo(
size=80,
alias_input_port=-1,
dtype=types_pb2.DT_INT32,
shape=tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=4),
tensor_shape_pb2.TensorShapeProto.Dim(size=5),
]
)
),
],
),
cost_graph_pb2.CostGraphDef.Node(
name="Z1",
device="/device:CPU:0",
id=2,
input_info=[
cost_graph_pb2.CostGraphDef.Node.InputInfo(
preceding_node=0,
preceding_port=0,
),
cost_graph_pb2.CostGraphDef.Node.InputInfo(
preceding_node=1,
preceding_port=0,
),
],
output_info=[
cost_graph_pb2.CostGraphDef.Node.OutputInfo(
size=60,
alias_input_port=-1,
dtype=types_pb2.DT_INT32,
shape=tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=3),
tensor_shape_pb2.TensorShapeProto.Dim(size=5),
]
)
),
],
is_final=True,
),
cost_graph_pb2.CostGraphDef.Node(
name="Z2",
device="/device:CPU:0",
id=3,
input_info=[
cost_graph_pb2.CostGraphDef.Node.InputInfo(
preceding_node=0,
preceding_port=0,
),
cost_graph_pb2.CostGraphDef.Node.InputInfo(
preceding_node=1,
preceding_port=0,
),
],
output_info=[
cost_graph_pb2.CostGraphDef.Node.OutputInfo(
size=60,
alias_input_port=-1,
dtype=types_pb2.DT_INT32,
shape=tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=3),
tensor_shape_pb2.TensorShapeProto.Dim(size=5),
]
)
),
],
),
]
)
self._sizeless_cost_graph = self.StripCostGraphDef(
self._cost_graph, "SIZES")
self._deviceless_cost_graph = self.StripCostGraphDef(
self._cost_graph, "DEVICES")
self._cost_graph_string = self._cost_graph.SerializeToString()
self._sizeless_cost_graph_string = (
self._sizeless_cost_graph.SerializeToString())
self._deviceless_cost_graph_string = (
self._deviceless_cost_graph.SerializeToString())
def StripCostGraphDef(self, cost_graph, to_strip):
"""Removes fields from a CostGraphDef protobuf.
Helper method to reduce the initialization of CostGraphDef(s).
Args:
cost_graph: a CostGraphDef to strip.
to_strip: a string, either "SIZES" or "DEVICES".
Returns:
a new CostGraphDef with either size information or device information
stripped, as appropriate.
"""
new_cost_graph = cost_graph_pb2.CostGraphDef()
new_cost_graph.CopyFrom(cost_graph)
for node in new_cost_graph.node:
if to_strip == "SIZES":
for output_info in node.output_info:
output_info.size = 0
output_info.ClearField("shape")
if to_strip == "DEVICES":
node.ClearField("device")
return new_cost_graph
def VerifyGraphInterface(self, graph):
self.assertEqual(list(graph.get_all_operation_names()),
["X", "Y", "Z1", "Z2"])
self.assertEqual(list(graph.get_operation_input_names("X")), [])
self.assertEqual(list(graph.get_operation_input_names("Y")), [])
self.assertEqual(list(graph.get_operation_input_names("Z1")),
["X:0", "Y:0"])
self.assertEqual(list(graph.get_operation_input_names("Z2")),
["X:0", "Y:0"])
self.assertEqual(list(graph.get_operation_output_names("X")), ["X:0"])
self.assertEqual(list(graph.get_operation_output_names("Y")), ["Y:0"])
self.assertEqual(list(graph.get_operation_output_names("Z1")), ["Z1:0"])
self.assertEqual(list(graph.get_operation_output_names("Z2")), ["Z2:0"])
self.assertEqual(list(graph.get_all_tensor_names()),
["X:0", "Y:0", "Z1:0", "Z2:0"])
self.assertEqual(graph.get_tensor_dtype("X:0"), tf.int32)
self.assertEqual(graph.get_tensor_dtype("Y:0"), tf.int32)
self.assertEqual(graph.get_tensor_dtype("Z1:0"), tf.int32)
self.assertEqual(graph.get_tensor_dtype("Z2:0"), tf.int32)
self.assertEqual(graph.get_tensor_shape("X:0"), tf.TensorShape([3, 4]))
self.assertEqual(graph.get_tensor_shape("Y:0"), tf.TensorShape([4, 5]))
self.assertEqual(graph.get_tensor_shape("Z1:0"), tf.TensorShape([3, 5]))
self.assertEqual(graph.get_tensor_shape("Z2:0"), tf.TensorShape([3, 5]))
self.assertEqual(graph.get_tensor_num_entries("X:0"), 12)
self.assertEqual(graph.get_tensor_num_entries("Y:0"), 20)
self.assertEqual(graph.get_tensor_num_entries("Z1:0"), 15)
self.assertEqual(graph.get_tensor_num_entries("Z2:0"), 15)
graph.set_tensor_final("Z1:0")
self.assertEqual(graph.compute_memory_contents_under_schedule([0, 1, 2, 3]),
[frozenset(["X:0"]), frozenset(["X:0", "Y:0"]),
frozenset(["X:0", "Y:0", "Z1:0"]),
frozenset(["X:0", "Y:0", "Z1:0", "Z2:0"])])
self.assertEqual(graph.compute_memory_contents_under_schedule([0, 1, 3, 2]),
[frozenset(["X:0"]), frozenset(["X:0", "Y:0"]),
frozenset(["X:0", "Y:0", "Z2:0"]),
frozenset(["X:0", "Y:0", "Z1:0"])])
def testTensorFlowGraph(self):
tf_graph = tf.Graph()
with tf_graph.as_default():
with tf.device("/device:CPU:0"):
x = tf.zeros([3, 4], dtype=tf.int32, name="X")
y = tf.zeros([4, 5], dtype=tf.int32, name="Y")
tf.matmul(x, y, name="Z1")
tf.matmul(x, y, name="Z2")
graph = graph_interface.GraphInterface(tf_graph,
canonical_device="/device:CPU:0")
self.VerifyGraphInterface(graph)
self.assertCountEqual(graph.get_operation_mtf_dimension_names("X"), [])
self.assertCountEqual(graph.get_operation_mtf_dimension_names("Y"), [])
self.assertCountEqual(graph.get_operation_mtf_dimension_names("Z1"), [])
self.assertCountEqual(graph.get_operation_mtf_dimension_names("Z2"), [])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("X:0"), [])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("Y:0"), [])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("Z1:0"), [])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("Z2:0"), [])
self.assertEqual(graph.get_tensor_device("X:0"), "/device:CPU:0")
self.assertEqual(graph.get_tensor_device("Y:0"), "/device:CPU:0")
self.assertEqual(graph.get_tensor_device("Z1:0"), "/device:CPU:0")
self.assertEqual(graph.get_tensor_device("Z2:0"), "/device:CPU:0")
self.assertTrue(graph.is_tensor_on_canonical_device("X:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("Y:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("Z1:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("Z2:0"))
self.assertEqual(graph.compute_cost_graph().SerializeToString(),
self._cost_graph_string)
self.assertEqual(graph.compute_cost_graph(devices=["/device:CPU:0"])
.SerializeToString(),
self._cost_graph_string)
self.assertEqual(graph.compute_cost_graph(devices=[]).SerializeToString(),
self._sizeless_cost_graph_string)
def testMeshTensorFlowGraph(self):
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, "my_mesh")
x = mtf.Constant(mesh, 0,
shape=mtf.convert_to_shape("a:3,b:4"),
dtype=tf.int32,
name="X").outputs[0]
y = mtf.Constant(mesh, 0,
shape=mtf.convert_to_shape("b:4,c:5"),
dtype=tf.int32,
name="Y").outputs[0]
mtf.EinsumOperation([x, y], mtf.convert_to_shape("a:3,c:5"), name="Z1")
mtf.EinsumOperation([x, y], mtf.convert_to_shape("a:3,c:5"), name="Z2")
graph = graph_interface.GraphInterface(mtf_graph)
self.VerifyGraphInterface(graph)
self.assertCountEqual(graph.get_operation_mtf_dimension_names("X"),
["a", "b"])
self.assertCountEqual(graph.get_operation_mtf_dimension_names("Y"),
["b", "c"])
self.assertCountEqual(graph.get_operation_mtf_dimension_names("Z1"),
["a", "b", "c"])
self.assertCountEqual(graph.get_operation_mtf_dimension_names("Z2"),
["a", "b", "c"])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("X:0"),
["a", "b"])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("Y:0"),
["b", "c"])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("Z1:0"),
["a", "c"])
self.assertCountEqual(graph.get_tensor_mtf_dimension_names("Z1:0"),
["a", "c"])
self.assertIsNone(graph.get_tensor_device("X:0"))
self.assertIsNone(graph.get_tensor_device("Y:0"))
self.assertIsNone(graph.get_tensor_device("Z1:0"))
self.assertIsNone(graph.get_tensor_device("Z2:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("X:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("Y:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("Z1:0"))
self.assertTrue(graph.is_tensor_on_canonical_device("Z2:0"))
self.assertEqual(graph.compute_cost_graph().SerializeToString(),
self._deviceless_cost_graph_string)
self.assertEqual(graph.compute_cost_graph(devices=[]).SerializeToString(),
self._deviceless_cost_graph_string)
def testNotAGraph(self):
self.assertRaises(TypeError, graph_interface.GraphInterface, "hello")
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/auto_mtf/graph_interface_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.auto_mtf.layout."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import mesh_tensorflow.auto_mtf # pylint: disable=unused-import
import mesh_tensorflow.auto_mtf.api
import tensorflow.compat.v1 as tf
class LayoutTest(tf.test.TestCase):
def testLayout(self):
# Construct a Mesh TensorFlow graph and mesh.
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, "my_mesh")
x = mtf.zeros(mesh, "a:10,b:5")
y = mtf.zeros(mesh, "b:5,c:20")
z = mtf.einsum([x, y], "a:10,c:20")
# Decide on a mesh shape.
mesh_shape = mtf.convert_to_shape("m1:4,m2:2")
# Compute a layout based on the graph and mesh.
# Note that knowing the identity of the outputs is important to the
# optimization since they cannot be freed.
layout = mtf.auto_mtf.layout(mtf_graph, mesh_shape, [z])
a_dim = mtf.convert_to_dimension(("a", 10))
b_dim = mtf.convert_to_dimension(("b", 5))
c_dim = mtf.convert_to_dimension(("c", 20))
self.assertEqual(layout.tensor_dimension_to_mesh_axis(a_dim, mesh_shape), 1)
self.assertIsNone(layout.tensor_dimension_to_mesh_axis(b_dim, mesh_shape))
self.assertEqual(layout.tensor_dimension_to_mesh_axis(c_dim, mesh_shape), 0)
def testLayoutAndMeshShape(self):
# Same as previous test, but don't specify a 4x2 mesh.
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, "my_mesh")
x = mtf.zeros(mesh, "a:10,b:5")
y = mtf.zeros(mesh, "b:5,c:20")
z = mtf.einsum([x, y], "a:10,c:20")
layout, mesh_shape = mtf.auto_mtf.layout_and_mesh_shape(mtf_graph, 8, [z])
a_dim = mtf.convert_to_dimension(("a", 10))
b_dim = mtf.convert_to_dimension(("b", 5))
c_dim = mtf.convert_to_dimension(("c", 20))
self.assertEqual(layout.tensor_dimension_to_mesh_axis(a_dim, mesh_shape), 1)
self.assertIsNone(layout.tensor_dimension_to_mesh_axis(b_dim, mesh_shape))
self.assertEqual(layout.tensor_dimension_to_mesh_axis(c_dim, mesh_shape), 0)
self.assertCountEqual(mesh_shape.dims,
[mtf.Dimension("mesh_0", 4),
mtf.Dimension("mesh_1", 2)])
layout, mesh_shape = mtf.auto_mtf.layout_and_mesh_shape(
mtf_graph, 8, [z], 1)
self.assertIsNone(layout.tensor_dimension_to_mesh_axis(a_dim, mesh_shape))
self.assertIsNone(layout.tensor_dimension_to_mesh_axis(b_dim, mesh_shape))
self.assertIsNone(layout.tensor_dimension_to_mesh_axis(c_dim, mesh_shape))
self.assertCountEqual(mesh_shape.dims, [mtf.Dimension("mesh_0", 8)])
def testMeshShapeIterator(self):
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(1)), [[1]])
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(2)), [[2]])
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(4)),
[[4], [2, 2]])
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(8)),
[[8], [4, 2], [2, 2, 2]])
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(512)),
[[512],
[256, 2],
[128, 4],
[128, 2, 2],
[64, 8],
[64, 4, 2],
[64, 2, 2, 2],
[32, 16],
[32, 8, 2],
[32, 4, 4],
[32, 4, 2, 2],
[32, 2, 2, 2, 2],
[16, 16, 2],
[16, 8, 4],
[16, 8, 2, 2],
[16, 4, 4, 2],
[16, 4, 2, 2, 2],
[16, 2, 2, 2, 2, 2],
[8, 8, 8],
[8, 8, 4, 2],
[8, 8, 2, 2, 2],
[8, 4, 4, 4],
[8, 4, 4, 2, 2],
[8, 4, 2, 2, 2, 2],
[8, 2, 2, 2, 2, 2, 2],
[4, 4, 4, 4, 2],
[4, 4, 4, 2, 2, 2],
[4, 4, 2, 2, 2, 2, 2],
[4, 2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 2, 2, 2]])
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(512, 1)),
[[512]])
self.assertCountEqual(
list(mesh_tensorflow.auto_mtf.api._mesh_shape_iterator(512, 2)),
[[512], [256, 2], [128, 4], [64, 8], [32, 16]])
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/auto_mtf/api_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes layouts for Mesh TensorFlow.
Classes and methods to encode a Mesh TensorFlow computation as a series of
Operations and then find a layout to minimize per-machine memory usage.
Sample Usage:
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, "my_mesh")
mesh_shape = mtf.convert_to_shape("m1:2;m2:2")
# Add some operations to mesh using Mesh TensorFlow.
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
layout = optimizer.solve()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl import logging
from mesh_tensorflow.auto_mtf import print_cp_model_solution
from mesh_tensorflow.auto_mtf import scheduler
import six
from ortools.sat.python import cp_model
class SolverError(Exception):
pass
class LayoutOptimizer(object):
"""Tries to compute a good layout for Mesh Tensorflow.
Given a mesh shape (see Mesh TensorFlow) and several operations, computes a
good layout (a mapping from TensorFlow dimensions to mesh dimensions) using
integer programming.
More formally, suppose T is the set of TensorFlow dimensions and M is the set
of mesh dimensions. A layout L is a map from (T) to (M union {"unassigned"}),
designating which tensor dimensions are split using which mesh dimensions.
We wish to compute a layout that minimizes memory usage. Unfortunately, the
memory usage doesn't just depend on the layout, but also on how the scheduler
orders operations; whenever an operation is being performed, there are other
tensors in memory besides that operation's input and output tensors. The
layout, however, affects the size of the various tensors in memory, as well as
*the amount of temporary memory the operation uses*.
With this in mind, our (boolean) integer program to minimize memory is:
Variables:
x_{t->m}: "global" (boolean) variables; takes a value of 1 if t in T is
assigned to m in M, and a value of 0 otherwise.
y_{assignment}: "local" (boolean) variables; for every set of TensorFlow
dimensions used in an operation or tensor and for every (valid)
assignment from that set of TF dimensions to (M union {"unassigned"}),
we have one of these which takes a value of 1 if the global variables
completely agree with that assignment and 0 otherwise.
z: memory (continuous) variable; the peak memory usage.
Constraints:
Operation Constraints: for every operation, no two dimensions used in that
operation can be mapped to the same mesh dimension (since doing so would
cause some of its computation to be skipped entirely).
Global Constraints: we enforce that every TensorFlow dimension is assigned
to at most one mesh dimension (it may be unassigned).
(Optional) Divisibility Constraints: we enforce that a TensorFlow dimension
can only be assigned to a mesh dimension if the latter's size evenly
divides the former's size.
Local Constraints: we enforce that out of all assignments that share a
domain (i.e. set of TensorFlow dimensions), exactly one is chosen.
Global-to-Local Constraints: we enforce that assignment(t) = m, then
x_{t->m} must be 1 for y_{assignment} to be 1. We also enforce that if
assignment(t) = "unassigned", then x_{t->m} must be 0 for all m in M.
Memory Constraints: for every operation i, the peak memory usage z must be
least the memory usage during that operation. The latter can be derived
from memory_contents[i] and the local variables relevant to those
tensors (their new sizes) and to the operation (temporary memory
needed).
Objective Function:
We want to minimize the variable z. However, we want to tiebreak by the
number of assigned dimensions (preferring more dimensions), so our
modified objective is (#MTF Dimensions + 1) * z - sum x_{t->m}. Note that we
prefer more splitting because in general splits result in smaller tensors
and less duplicated work.
"""
def __init__(self, memory_estimator, scheduler_alg="LIST"):
"""Uses a auto_mtf.memory_estimator to set up the integer program.
Args:
memory_estimator: a memory_estimator.MemoryEstimator.
scheduler_alg: an optional string, see scheduler.minimize_peak_memory.
"""
self._estimator = memory_estimator
self._scheduler_alg = scheduler_alg
self._layout_validator = self._estimator.get_layout_validator()
self._graph = self._estimator.get_graph_interface()
self._memory_contents = None # [frozenset(string)]
# Initialize the model.
self._model = cp_model.CpModel()
self._preprocess_input()
self._initialize_variables()
self._add_constraints()
self._build_objective_function()
def _preprocess_input(self):
"""Computing useful input data structures to ease IP construction."""
# Compute the sets of MTF dimensions used in operations/tensors.
# a {string: frozenset(string)}, mapping operation name to MTF dimension
# names.
self._operation_name_to_mtf_dimension_set = {}
# a {string: frozenset(string)}, mapping tensor name to MTF dimension names.
self._tensor_name_to_mtf_dimension_set = {}
for operation_name in self._graph.get_all_operation_names():
self._operation_name_to_mtf_dimension_set[operation_name] = frozenset(
set(self._graph.get_operation_mtf_dimension_names(
operation_name)).intersection(
self._layout_validator.splittable_mtf_dimension_names))
for tensor_name in self._graph.get_all_tensor_names():
self._tensor_name_to_mtf_dimension_set[tensor_name] = frozenset(
set(self._graph.get_tensor_mtf_dimension_names(tensor_name))
.intersection(self._layout_validator.splittable_mtf_dimension_names))
self._operation_mtf_dimension_sets = set(
self._operation_name_to_mtf_dimension_set.values())
self._mtf_dimension_sets = self._operation_mtf_dimension_sets | set(
self._tensor_name_to_mtf_dimension_set.values())
# Compute possible assignments for each set of MTF dimensions.
self._assignments = {} # indexed by MTF dimension set
for mtf_dimension_set in self._mtf_dimension_sets:
self._assignments[mtf_dimension_set] = _generate_assignments(
mtf_dimension_set, self._layout_validator.mesh_dimension_name_to_size)
def _initialize_variables(self):
"""Initializing the variables of the IP."""
# Initialize global variables.
self._global_vars = {} # Indexed by (MTF dimension, mesh dimension)
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
name = _global_var_name(mtf_dimension_name, mesh_dimension_name)
self._global_vars[(mtf_dimension_name, mesh_dimension_name)] = (
self._model.NewBoolVar(name))
# Initialize local variables.
self._local_vars = {} # Indexed by (tensorflow dimension set), then name of
# assignment.
for mtf_dimension_set in self._mtf_dimension_sets:
self._local_vars[mtf_dimension_set] = {}
for assignment in self._assignments[mtf_dimension_set]:
# TODO(joshuawang): Avoid hash collision no matter what dimension names
# are; don't hash by this local var name, swap to using a tuple encoding
# of the full assignment instead.
name = _local_var_name(mtf_dimension_set, assignment)
self._local_vars[mtf_dimension_set][name] = (
self._model.NewBoolVar(name))
# Initialize memory variable. We need a crude upper bound on memory, so we
# use the total size of all tensors under the empty assignment.
# NOTE(joshuawang): This bound could be improved by factoring in the
# schedule.
memory_upper_bound = 0
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
memory_upper_bound += int(self._graph.get_tensor_size(tensor_name))
self._memory_var = self._model.NewIntVar(0, memory_upper_bound, "z")
def _add_constraints(self):
"""Adding constraints to the IP."""
# Add operation constraints.
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
for mtf_dimension_set in self._operation_mtf_dimension_sets:
self._model.Add(
sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]
for mtf_dimension_name in mtf_dimension_set) <= 1)
# Add global constraints.
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
self._model.Add(
sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size)) <= 1)
# Add divisibility constraints.
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
if not self._layout_validator.is_valid_assignment(mtf_dimension_name,
mesh_dimension_name):
self._model.Add(self._global_vars[(mtf_dimension_name,
mesh_dimension_name)] == 0)
# Add local constraints.
for mtf_dimension_set in self._mtf_dimension_sets:
self._model.Add(
sum(self._local_vars[mtf_dimension_set][_local_var_name(
mtf_dimension_set, assignment)]
for assignment in self._assignments[mtf_dimension_set]) == 1)
# Add local-to-global constraints.
for mtf_dimension_set in self._mtf_dimension_sets:
for assignment in self._assignments[mtf_dimension_set]:
name = _local_var_name(mtf_dimension_set, assignment)
for mtf_dimension_name in mtf_dimension_set:
if mtf_dimension_name in assignment:
mesh_dimension_name = assignment[mtf_dimension_name]
self._model.AddImplication(
self._local_vars[mtf_dimension_set][name],
self._global_vars[(mtf_dimension_name, mesh_dimension_name)])
else:
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
self._model.AddImplication(
self._global_vars[(mtf_dimension_name, mesh_dimension_name)],
self._local_vars[mtf_dimension_set][name].Not())
# Add memory constraints.
tensor_memory_sum = {}
for tensor_name in self._graph.get_all_tensor_names():
tensor_memory_sum[tensor_name] = 0
mtf_dimension_set = self._tensor_name_to_mtf_dimension_set[tensor_name]
if not self._graph.is_tensor_on_canonical_device(tensor_name):
continue
for assignment in self._assignments[mtf_dimension_set]:
size_under_assignment = self._graph.get_tensor_size(
tensor_name, assignment,
self._layout_validator.mesh_dimension_name_to_size)
name = _local_var_name(mtf_dimension_set, assignment)
tensor_memory_sum[tensor_name] += (
size_under_assignment * self._local_vars[mtf_dimension_set][name])
for tensor_names in self._get_memory_contents():
self._model.Add(
sum(tensor_memory_sum[tensor_name]
for tensor_name in tensor_names) <= self._memory_var)
def _build_objective_function(self):
"""Builds the objective function of the IP."""
# Break ties in favor of more assignments.
scale = len(self._layout_validator.splittable_mtf_dimension_names) + 1
objective = scale * self._memory_var - sum(six.itervalues(
self._global_vars))
self._model.Minimize(objective)
def _get_memory_contents(self):
"""Runs the scheduler to determine memory contents at every point in time.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
GetAllOperationNames()).
"""
if self._memory_contents is not None:
return self._memory_contents
schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg)
self._memory_contents = self._graph.compute_memory_contents_under_schedule(
schedule)
return self._memory_contents
def solve(self, print_solution=False):
"""Solves the current integer program and returns the computed layout.
Args:
print_solution: An optional boolean indicating whether to print the full
solution in human-readable format.
Returns:
The computed layout (as a string).
Raises:
SolverError: the internal solver could not find a solution, or the
solution found is infeasible.
"""
# Solve and see how well the solver did.
self._cp_solver = cp_model.CpSolver()
status = self._cp_solver.Solve(self._model)
if status != cp_model.OPTIMAL:
if status == cp_model.FEASIBLE:
logging.warning("A potentially suboptimal solution was found.")
else:
logging.error("Solver returned status %d.", status)
raise SolverError("The solver could not solve the problem and returned "
"status {}.".format(status))
# TODO(joshuawang): Verify the solver's solution.
if print_solution:
print_cp_model_solution.print_solution(self._model, self._cp_solver)
# Reconstruct layout from solution.
layout = []
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name,
mesh_dimension_name)])
if value: # Value is integer.
layout.append(mtf_dimension_name + ":" + mesh_dimension_name)
layout.sort()
return ";".join(layout)
def evaluate_layout(self, layout):
"""The current objective value for the given layout.
TODO(joshuawang): The current function does not check that the given
layout is valid.
Args:
layout: a string, representing a layout to evaluate (e.g.
"d_ff:m1;heads:m2").
Returns:
A float, the objective value.
"""
layout_dict = {}
if layout:
for pair in layout.split(";"):
mtf_dimension_name, mesh_dimension_name = pair.split(":", 1)
if (mtf_dimension_name in
self._layout_validator.splittable_mtf_dimension_names):
layout_dict[mtf_dimension_name] = mesh_dimension_name
else:
logging.warning("Skipping unsplittable dimension %s.",
mtf_dimension_name)
tensor_memory = {} # {string: float}, size of each tensor under our layout
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
tensor_memory[tensor_name] = self._graph.get_tensor_size(
tensor_name, layout_dict,
self._layout_validator.mesh_dimension_name_to_size)
else:
tensor_memory[tensor_name] = 0.0
peak_memory_usage = 0.0
for tensor_names in self._get_memory_contents():
memory_usage = 0.0
for tensor_name in tensor_names:
memory_usage += tensor_memory[tensor_name]
peak_memory_usage = max(peak_memory_usage, memory_usage)
return peak_memory_usage
def _global_var_name(splittable_dimension, mesh_dimension):
"""Name for a global variable.
Args:
splittable_dimension: the name of a splittable dimension (string)
mesh_dimension: the name of a mesh dimension (string)
Returns:
A string, the variable name.
"""
return "x_({}:{})".format(splittable_dimension, mesh_dimension)
def _local_var_name(splittable_dimensions, assignment):
"""Name for a local variable.
Args:
splittable_dimensions: frozenset of names of splittable dimensions.
assignment: dict from names of splittable dimensions to names of mesh
dimensions.
Returns:
A string, the variable name.
"""
assignment_string = []
for splittable in sorted(splittable_dimensions):
if splittable in assignment:
assignment_string.append("{}:{}".format(splittable,
assignment[splittable]))
else:
assignment_string.append("{}".format(splittable))
return "y_(" + ",".join(assignment_string) + ")"
def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):
"""Generates all ways to map splittable dimensions to mesh dimensions.
Args:
splittable_dimensions: a frozenset of the names of splittable dimensions.
mesh_dimension_to_size: a dictionary from mesh dimension name to size.
Returns:
A list of the valid assignments. Each assignment is a dict keyed by every
splittable dimension, whose value is either a mesh dimension or None.
"""
assignments = []
for assignment_size in six.moves.xrange(
1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))):
for s_dims_chosen in itertools.combinations(splittable_dimensions,
assignment_size):
for m_dims_chosen in itertools.permutations(mesh_dimension_to_size,
assignment_size):
assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))
return assignments
| mesh-master | mesh_tensorflow/auto_mtf/layout_optimizer.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tf.data.Dataset interface to the MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow.compat.v1 as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
url = 'http://yann.lecun.com/exdb/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
| mesh-master | examples/mnist_dataset.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A toy model using Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import numpy
import tensorflow.compat.v1 as tf
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu_config # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.tpu import tpu_estimator # pylint: disable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator import estimator as estimator_lib
FLAGS = flags.FLAGS
tf.flags.DEFINE_integer('batch_size', 64, 'Training batch size.')
tf.flags.DEFINE_integer('io_size', 16, 'Number of channels per feature.')
tf.flags.DEFINE_integer('hidden_size', 16, 'Size of each hidden layer.')
tf.flags.DEFINE_integer('num_hidden_layers', 1, 'Number of layers.')
tf.flags.DEFINE_string('master_dtype', 'bfloat16', 'dtype for master vars.')
tf.flags.DEFINE_string('slice_dtype', 'float32', 'dtype for slice vars.')
tf.flags.DEFINE_string('activation_dtype', 'float32', 'dtype for activations.')
tf.flags.DEFINE_string('optimizer', 'SGD', 'optimizer (SGD or Adafactor).')
tf.flags.DEFINE_float('lr', 1e-4, 'Learning rate.')
tf.flags.DEFINE_string('mesh_shape', 'all:8', 'mesh shape')
tf.flags.DEFINE_string('layout', 'hidden_odd:all', 'layout rules')
tf.flags.DEFINE_integer('iterations', 100,
'Number of iterations per training loop.')
tf.flags.DEFINE_integer('step_with_nan', -1,
'If >= 0, a NaN tensor is added in forward pass.')
tf.flags.DEFINE_integer('train_steps', 10000, 'max steps')
tf.flags.DEFINE_integer('steps_per_checkpoint', 200, 'steps_per_checkpoint')
tf.flags.DEFINE_string(
'model_dir',
default='',
help='The directory where the model will be stored.')
tf.flags.DEFINE_bool('use_tpu', True, 'use TPU')
# Cloud TPU Cluster Resolvers
tf.flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
tf.flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
class ToyModelInput(object):
"""Wrapper class that acts as the input_fn to TPUEstimator."""
def __init__(self):
self._num_examples = 10000 # 10k
self._images = numpy.random.uniform(
0, 1.0, [self._num_examples, FLAGS.io_size]).astype(numpy.float32)
self._labels = self._images
logging.info('init ToyModelInput()')
def __call__(self, params):
"""Input function which provides a single batch for train or eval."""
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# `tf.estimator.tpu.RunConfig` for details.
batch_size = params['batch_size']
logging.info('call ToyModelInput() with batch size {}'.format(batch_size))
ds = Dataset.from_tensor_slices((self._images, self._labels)).repeat()
dataset = ds.batch(batch_size, drop_remainder=True).prefetch(2)
return dataset
def toy_model(features, mesh):
"""A toy model implemented by mesh tensorlfow."""
batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
io_dim = mtf.Dimension('io', FLAGS.io_size)
master_dtype = tf.as_dtype(FLAGS.master_dtype)
slice_dtype = tf.as_dtype(FLAGS.slice_dtype)
activation_dtype = tf.as_dtype(FLAGS.activation_dtype)
x = mtf.import_tf_tensor(mesh, features, mtf.Shape([batch_dim, io_dim]))
x = mtf.cast(x, activation_dtype)
h = x
for lnum in range(1, FLAGS.num_hidden_layers + 2):
if lnum + 1 == FLAGS.num_hidden_layers + 2:
# output layer
dim = io_dim
elif lnum % 2 == 0:
dim = mtf.Dimension('hidden_even', FLAGS.hidden_size)
else:
dim = mtf.Dimension('hidden_odd', FLAGS.hidden_size)
h = mtf.layers.dense(
h, dim,
use_bias=False,
master_dtype=master_dtype,
slice_dtype=slice_dtype,
name='layer_%d' % lnum)
y = h
g = tf.train.get_global_step()
if FLAGS.step_with_nan >= 0:
# Trigger NaN in the forward pass, this is used for testing whether
# MeshTensorFlow can handle occasional NaN value.
y += mtf.import_tf_tensor(
mesh,
tf.divide(
0.0,
tf.cond(tf.equal(g, FLAGS.step_with_nan), lambda: 0., lambda: 1.)),
mtf.Shape([]))
loss = mtf.reduce_mean(mtf.square(y - x))
return y, loss
def model_fn(features, labels, mode, params):
"""A model is called by TpuEstimator."""
del labels
global_step = tf.train.get_global_step()
graph = mtf.Graph()
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
if FLAGS.use_tpu:
ctx = params['context']
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
tf.logging.info('device_list = %s' % device_list,)
# TODO(ylc): Better estimation of replica cache size?
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [''] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, mesh_devices, ctx.device_assignment)
else:
var_placer = None
mesh_devices = [''] * mesh_shape.size
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
mesh = mtf.Mesh(graph, 'my_mesh', var_placer)
with mtf.utils.outside_all_rewrites():
logits, loss = toy_model(features, mesh)
# TRAIN mode
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients([loss],
[v.outputs[0] for v in graph.trainable_variables])
if FLAGS.optimizer == 'Adafactor':
optimizer = mtf.optimize.AdafactorOptimizer()
else:
assert FLAGS.optimizer == 'SGD'
optimizer = mtf.optimize.SgdOptimizer(learning_rate=FLAGS.lr)
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
else:
# for now, we can only export fully-replicated tensors.
fully_replicated_logits = mtf.anonymize(logits)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = tf.to_float(lowering.export_to_tf_tensor(loss))
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf.logging.info('tf_update_ops: {}'.format(tf_update_ops))
train_op = tf.group(tf_update_ops)
else:
tf_logits = lowering.export_to_tf_tensor(fully_replicated_logits)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
if mode == tf.estimator.ModeKeys.TRAIN:
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
return tpu_estimator.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
training_hooks=[restore_hook, saver_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(tf_logits):
mean_logits = tf.metrics.mean(tf_logits)
return {'mean_logits': mean_logits}
eval_metrics = (metric_fn, [tf_logits])
return tpu_estimator.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
evaluation_hooks=[restore_hook],
loss=tf_loss,
eval_metrics=eval_metrics)
def run_toy_model_tpu():
"""Run a toy model on TPU."""
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
iterations_per_loop = FLAGS.iterations
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
config = tpu_config.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=None, # Disable the default saver
save_checkpoints_secs=None, # Disable the default saver
log_step_count_steps=iterations_per_loop,
save_summary_steps=iterations_per_loop,
tpu_config=tpu_config.TPUConfig(
num_shards=mesh_shape.size,
iterations_per_loop=iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST))
classifier = tpu_estimator.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
current_step = estimator_lib._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
logging.info('Current step %d', current_step)
if FLAGS.steps_per_checkpoint == 0:
classifier.train(input_fn=ToyModelInput(), max_steps=FLAGS.train_steps)
return
while current_step < FLAGS.train_steps:
next_checkpoint = min(current_step + FLAGS.steps_per_checkpoint,
FLAGS.train_steps)
classifier.train(input_fn=ToyModelInput(), max_steps=next_checkpoint)
current_step = next_checkpoint
logging.info('Starting to evaluate.')
eval_results = classifier.evaluate(
input_fn=ToyModelInput(),
steps=156) # since we have 10000 examples and batch_size = 64 per host
logging.info('Eval results: %s', eval_results)
def main(_):
run_toy_model_tpu()
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| mesh-master | examples/toy_model_tpu.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST using Mesh TensorFlow and TF Estimator.
This is an illustration, not a good model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import mnist_dataset as dataset # local file import
import tensorflow.compat.v1 as tf
tf.flags.DEFINE_string("data_dir", "/tmp/mnist_data",
"Path to directory containing the MNIST dataset")
tf.flags.DEFINE_string("model_dir", "/tmp/mnist_model", "Estimator model_dir")
tf.flags.DEFINE_integer("batch_size", 200,
"Mini-batch size for the training. Note that this "
"is the global batch size and not the per-shard batch.")
tf.flags.DEFINE_integer("hidden_size", 512, "Size of each hidden layer.")
tf.flags.DEFINE_integer("train_epochs", 40, "Total number of training epochs.")
tf.flags.DEFINE_integer("epochs_between_evals", 1,
"# of epochs between evaluations.")
tf.flags.DEFINE_integer("eval_steps", 0,
"Total number of evaluation steps. If `0`, evaluation "
"after training is skipped.")
tf.flags.DEFINE_string("mesh_shape", "b1:2;b2:2", "mesh shape")
tf.flags.DEFINE_string("layout", "row_blocks:b1;col_blocks:b2",
"layout rules")
FLAGS = tf.flags.FLAGS
def mnist_model(image, labels, mesh):
"""The model.
Args:
image: tf.Tensor with shape [batch, 28*28]
labels: a tf.Tensor with shape [batch] and dtype tf.int32
mesh: a mtf.Mesh
Returns:
logits: a mtf.Tensor with shape [batch, 10]
loss: a mtf.Tensor with shape []
"""
batch_dim = mtf.Dimension("batch", FLAGS.batch_size)
row_blocks_dim = mtf.Dimension("row_blocks", 4)
col_blocks_dim = mtf.Dimension("col_blocks", 4)
rows_dim = mtf.Dimension("rows_size", 7)
cols_dim = mtf.Dimension("cols_size", 7)
classes_dim = mtf.Dimension("classes", 10)
one_channel_dim = mtf.Dimension("one_channel", 1)
x = mtf.import_tf_tensor(
mesh, tf.reshape(image, [FLAGS.batch_size, 4, 7, 4, 7, 1]),
mtf.Shape(
[batch_dim, row_blocks_dim, rows_dim,
col_blocks_dim, cols_dim, one_channel_dim]))
x = mtf.transpose(x, [
batch_dim, row_blocks_dim, col_blocks_dim,
rows_dim, cols_dim, one_channel_dim])
# add some convolutional layers to demonstrate that convolution works.
filters1_dim = mtf.Dimension("filters1", 16)
filters2_dim = mtf.Dimension("filters2", 16)
f1 = mtf.relu(mtf.layers.conv2d_with_blocks(
x, filters1_dim, filter_size=[9, 9], strides=[1, 1], padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim, name="conv0"))
f2 = mtf.relu(mtf.layers.conv2d_with_blocks(
f1, filters2_dim, filter_size=[9, 9], strides=[1, 1], padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim, name="conv1"))
x = mtf.reduce_mean(f2, reduced_dim=filters2_dim)
# add some fully-connected dense layers.
hidden_dim1 = mtf.Dimension("hidden1", FLAGS.hidden_size)
hidden_dim2 = mtf.Dimension("hidden2", FLAGS.hidden_size)
h1 = mtf.layers.dense(
x, hidden_dim1,
reduced_dims=x.shape.dims[-4:],
activation=mtf.relu, name="hidden1")
h2 = mtf.layers.dense(
h1, hidden_dim2,
activation=mtf.relu, name="hidden2")
logits = mtf.layers.dense(h2, classes_dim, name="logits")
if labels is None:
loss = None
else:
labels = mtf.import_tf_tensor(
mesh, tf.reshape(labels, [FLAGS.batch_size]), mtf.Shape([batch_dim]))
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, mtf.one_hot(labels, classes_dim), classes_dim)
loss = mtf.reduce_mean(loss)
return logits, loss
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
tf.logging.info("features = %s labels = %s mode = %s params=%s" %
(features, labels, mode, params))
global_step = tf.train.get_global_step()
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
logits, loss = mnist_model(features, labels, mesh)
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
mesh_size = mesh_shape.size
mesh_devices = [""] * mesh_size
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
optimizer = mtf.optimize.AdafactorOptimizer()
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
restore_hook = mtf.MtfRestoreHook(lowering)
tf_logits = lowering.export_to_tf_tensor(logits)
if mode != tf.estimator.ModeKeys.PREDICT:
tf_loss = lowering.export_to_tf_tensor(loss)
tf.summary.scalar("loss", tf_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
train_op = tf.group(tf_update_ops)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False, save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(tf_logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(tf_loss, "cross_entropy")
tf.identity(accuracy[1], name="train_accuracy")
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar("train_accuracy", accuracy[1])
# restore_hook must come before saver_hook
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,
training_chief_hooks=[restore_hook, saver_hook])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"classes": tf.argmax(tf_logits, axis=1),
"probabilities": tf.nn.softmax(tf_logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
prediction_hooks=[restore_hook],
export_outputs={
"classify": tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=tf_loss,
evaluation_hooks=[restore_hook],
eval_metric_ops={
"accuracy":
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(tf_logits, axis=1)),
})
def run_mnist():
"""Run MNIST training and eval loop."""
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir)
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds_batched.repeat(FLAGS.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
# Train and evaluate model.
for _ in range(FLAGS.train_epochs // FLAGS.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=None)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print("\nEvaluation results:\n\t%s\n" % eval_results)
def main(_):
run_mnist()
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| mesh-master | examples/mnist.py |
from setuptools import setup, find_packages
setup(
name = 'autoregressive-linear-attention-cuda',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Autoregressive Linear Attention CUDA kernel',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/autoregressive-linear-attention-cuda',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'linear attention',
'cuda'
],
install_requires=[
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| autoregressive-linear-attention-cuda-main | setup.py |
autoregressive-linear-attention-cuda-main | autoregressive_linear_attention_cuda/__init__.py |
|
autoregressive-linear-attention-cuda-main | autoregressive_linear_attention_cuda/autoregressive_linear_attention_cuda.py |
|
from setuptools import setup, find_packages
setup(
name = 'retro-pytorch',
packages = find_packages(exclude=[]),
version = '0.3.8',
license='MIT',
description = 'RETRO - Retrieval Enhanced Transformer - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/RETRO-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention-mechanism',
'retrieval',
],
install_requires=[
'autofaiss',
'einops>=0.3',
'numpy',
'sentencepiece',
'torch>=1.6',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| RETRO-pytorch-main | setup.py |
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from retro_pytorch.retrieval import BERT_VOCAB_SIZE
from einops import rearrange, repeat
# constants
MIN_DIM_HEAD = 32
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(val, divisor):
return (val / divisor).is_integer()
def cast_tuple(val, num = 1):
return val if isinstance(val, tuple) else ((val,) * num)
# deepnet init
def deepnorm_init(transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out']):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# normalization
class RMSNorm(nn.Module):
def __init__(
self,
dim,
*,
eps = 1e-8,
gated = False
):
super().__init__()
self.eps = eps
self.scale = dim ** -0.5
self.gamma = nn.Parameter(torch.ones(dim))
self.weight = nn.Parameter(torch.ones(dim)) if gated else None
def forward(self, x):
norm = x.norm(keepdim = True, dim = -1) * self.scale
out = (x / norm.clamp(min = self.eps)) * self.gamma
if not exists(self.weight):
return out
return out * (x * self.weight).sigmoid()
# pre and post norm residual wrapper modules
class PreNorm(nn.Module):
def __init__(self, dim, fn, norm_klass = RMSNorm):
super().__init__()
self.fn = fn
self.norm = norm_klass(dim)
def forward(self, x, *args, **kwargs):
return self.fn(self.norm(x), *args, **kwargs) + x
class PostNorm(nn.Module):
def __init__(self, dim, fn, scale_residual = 1, norm_klass = RMSNorm):
super().__init__()
self.fn = fn
self.scale_residual = scale_residual
self.norm = norm_klass(dim)
def forward(self, x, *args, **kwargs):
residual = x * self.scale_residual
out = self.fn(x, *args, **kwargs) + residual
return self.norm(out)
# positional embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, max_seq_len, *, device, offset = 0):
seq = torch.arange(max_seq_len, device = device) + offset
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
emb = torch.cat((freqs, freqs), dim = -1)
return rearrange(emb, 'n d -> 1 1 n d')
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs):
seq_len, rot_dim = t.shape[-2], freqs.shape[-1]
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim = -1)
# feedforward
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(mult * dim)
self.ff = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim)
)
def forward(self, x):
return self.ff(x)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
causal = False,
dropout = 0.,
null_kv = False
):
super().__init__()
context_dim = default(context_dim, dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
inner_dim = dim_head * heads
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_k = nn.Linear(context_dim, inner_dim, bias = False)
self.to_v = nn.Linear(context_dim, inner_dim, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
# allowing for attending to nothing (null function)
# and to save attention from breaking if all retrieved chunks are padded out
self.null_k = nn.Parameter(torch.randn(inner_dim)) if null_kv else None
self.null_v = nn.Parameter(torch.randn(inner_dim)) if null_kv else None
def forward(self, x, mask = None, context = None, pos_emb = None):
b, device, h, scale = x.shape[0], x.device, self.heads, self.scale
kv_input = default(context, x)
q, k, v = self.to_q(x), self.to_k(kv_input), self.to_v(kv_input)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# scale
q = q * scale
# apply relative positional encoding (rotary embeddings)
if exists(pos_emb):
q_pos_emb, k_pos_emb = cast_tuple(pos_emb, num = 2)
q = apply_rotary_pos_emb(q, q_pos_emb)
k = apply_rotary_pos_emb(k, k_pos_emb)
# add null key / values
if exists(self.null_k):
nk, nv = self.null_k, self.null_v
nk, nv = map(lambda t: repeat(t, '(h d) -> b h 1 d', b = b, h = h), (nk, nv))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# derive query key similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# masking
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
if exists(self.null_k):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, mask_value)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones(i, j, device = device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, mask_value)
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# combine heads linear out
return self.to_out(out)
class ChunkedCrossAttention(nn.Module):
def __init__(
self,
chunk_size,
**kwargs
):
super().__init__()
self.chunk_size = chunk_size
self.cross_attn = Attention(null_kv = True, **kwargs)
def forward(self, x, *, context_mask = None, context, pos_emb = None):
# derive variables
chunk_size = self.chunk_size
b, n, num_chunks, num_retrieved = x.shape[0], x.shape[-2], *context.shape[-4:-2]
# if sequence length less than chunk size, do an early return
if n < self.chunk_size:
return torch.zeros_like(x)
# causal padding
causal_padding = chunk_size - 1
x = F.pad(x, (0, 0, -causal_padding, causal_padding), value = 0.)
# remove sequence which is ahead of the neighbors retrieved (during inference)
seq_index = (n // chunk_size) * chunk_size
x, x_remainder = x[:, :seq_index], x[:, seq_index:]
seq_remain_len = x_remainder.shape[-2]
# take care of rotary positional embedding
# make sure queries positions are properly shifted to the future
q_pos_emb, k_pos_emb = pos_emb
q_pos_emb = F.pad(q_pos_emb, (0, 0, -causal_padding, causal_padding), value = 0.)
k_pos_emb = repeat(k_pos_emb, 'b h n d -> b h (r n) d', r = num_retrieved)
pos_emb = (q_pos_emb, k_pos_emb)
# reshape so we have chunk to chunk attention, without breaking causality
x = rearrange(x, 'b (k n) d -> (b k) n d', k = num_chunks)
context = rearrange(context, 'b k r n d -> (b k) (r n) d')
if exists(context_mask):
context_mask = rearrange(context_mask, 'b k r n -> (b k) (r n)')
# cross attention
out = self.cross_attn(x, context = context, mask = context_mask, pos_emb = pos_emb)
# reshape back to original sequence
out = rearrange(out, '(b k) n d -> b (k n) d', b = b)
# pad back to original, with 0s at the beginning (which will be added to the residual and be fine)
out = F.pad(out, (0, 0, causal_padding, -causal_padding + seq_remain_len), value = 0.)
return out
# encoder and decoder classes
class Encoder(nn.Module):
def __init__(
self,
dim,
*,
depth,
context_dim = None,
causal = False,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
final_norm = True,
cross_attn_layers = None,
post_norm = False,
output_dim = None,
norm_klass = RMSNorm,
scale_residual = 1.
):
super().__init__()
self.layers = nn.ModuleList([])
# partial rotary embeddings, which is better than full rotary
# Wang and Komatsuzaki et al https://github.com/kingoflolz/mesh-transformer-jax/
rotary_emb_dim = min(dim_head, MIN_DIM_HEAD)
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim)
wrapper = partial(PreNorm, dim, norm_klass = norm_klass) if not post_norm else partial(PostNorm, dim, scale_residual = scale_residual, norm_klass = norm_klass)
for layer_num in range(1, depth + 1):
has_cross_attn = not exists(cross_attn_layers) or layer_num in cross_attn_layers
self.layers.append(nn.ModuleList([
wrapper(Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, causal = causal)),
wrapper(Attention(dim = dim, context_dim = context_dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)) if has_cross_attn else None,
wrapper(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)),
]))
self.norm_out = norm_klass(dim) if final_norm and not post_norm else nn.Identity()
self.project_out = nn.Linear(dim, output_dim) if exists(output_dim) else nn.Identity()
def forward(self, x, *, mask = None, chunked_seq):
device, chunk_size, seq_len = x.device, x.shape[-2], chunked_seq.shape[-2]
q_pos_emb = self.rotary_pos_emb(chunk_size, device = device)
k_pos_emb = self.rotary_pos_emb(seq_len, device = device)
for attn, cross_attn, ff in self.layers:
x = attn(x, mask = mask, pos_emb = q_pos_emb)
if exists(cross_attn):
x = cross_attn(x, context = chunked_seq, pos_emb = (q_pos_emb, k_pos_emb))
x = ff(x)
x = self.norm_out(x)
return self.project_out(x)
class Decoder(nn.Module):
def __init__(
self,
dim,
*,
depth,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
final_norm = True,
cross_attn_layers = None,
chunk_size = 64,
post_norm = False,
norm_klass = RMSNorm,
scale_residual = 1.
):
super().__init__()
self.layers = nn.ModuleList([])
# partial rotary embeddings, which is better than full rotary
# Wang and Komatsuzaki et al https://github.com/kingoflolz/mesh-transformer-jax/
rotary_emb_dim = min(dim_head, MIN_DIM_HEAD)
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim)
wrapper = partial(PreNorm, dim, norm_klass = norm_klass) if not post_norm else partial(PostNorm, dim, scale_residual = scale_residual, norm_klass = norm_klass)
self.chunk_size = chunk_size
for layer_num in range(1, depth + 1):
has_cross_attn = not exists(cross_attn_layers) or layer_num in cross_attn_layers
self.layers.append(nn.ModuleList([
wrapper(Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, causal = True)),
wrapper(ChunkedCrossAttention(chunk_size = chunk_size, dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)) if has_cross_attn else None,
wrapper(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)),
]))
self.norm_out = norm_klass(dim) if final_norm and not post_norm else nn.Identity()
def forward(self, x, *, encoder = None, encoder_retrieved_mask = None, context_mask = None, retrieved = None):
device, seq_len = x.device, x.shape[-2]
self_attn_pos_emb = self.rotary_pos_emb(seq_len, device = device)
# calculate seq index
num_seq_chunks = seq_len // self.chunk_size
seq_index = num_seq_chunks * self.chunk_size
# rotary positions on the retrieved chunks
if exists(retrieved):
num_chunks, num_neighbors, chunk_size = retrieved.shape[-4:-1]
cross_attn_q_pos_emb = self.rotary_pos_emb(self.chunk_size, device = device, offset = self.chunk_size - 1) # need to add extra chunk size, since it will be shifted
cross_attn_k_pos_emb = self.rotary_pos_emb(chunk_size, device = device)
cross_attn_pos_emb = (cross_attn_q_pos_emb, cross_attn_k_pos_emb)
# keep track of whether retrieved tokens are encoded yet
retrieved_encoded = False
# go through the decoder layers
for attn, cross_attn, ff in self.layers:
x = attn(x, pos_emb = self_attn_pos_emb)
if exists(cross_attn) and exists(retrieved):
if not retrieved_encoded:
retrieved = rearrange(retrieved, 'b k r n d -> (b k r) n d')
seq_as_context = repeat(x[:, :seq_index], 'b (k n) d -> (b k r) n d', n = self.chunk_size, r = num_neighbors)
retrieved = encoder(retrieved, mask = encoder_retrieved_mask, chunked_seq = seq_as_context)
retrieved = rearrange(retrieved, '(b k r) n d -> b k r n d', k = num_chunks, r = num_neighbors)
retrieved_encoded = True
x = cross_attn(
x,
context = retrieved,
context_mask = context_mask,
pos_emb = cross_attn_pos_emb
)
x = ff(x)
return self.norm_out(x)
# main class
class RETRO(nn.Module):
def __init__(
self,
*,
num_tokens = BERT_VOCAB_SIZE,
max_seq_len = 2048,
enc_dim = 896,
enc_depth = 2,
enc_cross_attn_layers = None,
dec_depth = 12,
dec_cross_attn_layers = (1, 3, 6, 9),
heads = 8,
dec_dim = 768,
dim_head = 64,
enc_attn_dropout = 0.,
enc_ff_dropout = 0.,
dec_attn_dropout = 0.,
dec_ff_dropout = 0.,
chunk_size = 64,
pad_id = 0,
enc_scale_residual = None,
dec_scale_residual = None,
norm_klass = None,
gated_rmsnorm = False,
use_deepnet = False
):
super().__init__()
assert dim_head >= MIN_DIM_HEAD, f'dimension per head must be greater than {MIN_DIM_HEAD}'
self.seq_len = max_seq_len
self.pad_id = pad_id
self.token_emb = nn.Embedding(num_tokens, enc_dim)
self.pos_emb = nn.Embedding(max_seq_len, enc_dim)
self.chunk_size = chunk_size
self.to_decoder_model_dim = nn.Linear(enc_dim, dec_dim) if enc_dim != dec_dim else nn.Identity()
# for deepnet, residual scales
# follow equation in Figure 2. in https://arxiv.org/abs/2203.00555
norm_klass = default(norm_klass, RMSNorm)
if use_deepnet:
enc_scale_residual = default(enc_scale_residual, 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625)
dec_scale_residual = default(dec_scale_residual, (3 * dec_depth) ** 0.25)
norm_klass = nn.LayerNorm
# allow for gated rmsnorm
if gated_rmsnorm:
norm_klass = partial(RMSNorm, gated = True)
# define encoder and decoders
self.encoder = Encoder(
dim = enc_dim,
context_dim = dec_dim,
depth = enc_depth,
attn_dropout = enc_attn_dropout,
ff_dropout = enc_ff_dropout,
cross_attn_layers = enc_cross_attn_layers,
post_norm = use_deepnet,
norm_klass = norm_klass,
scale_residual = enc_scale_residual,
output_dim = dec_dim
)
self.decoder = Decoder(
dim = dec_dim,
depth = dec_depth,
attn_dropout = dec_attn_dropout,
ff_dropout = dec_ff_dropout,
cross_attn_layers = dec_cross_attn_layers,
chunk_size = chunk_size,
post_norm = use_deepnet,
norm_klass = norm_klass,
scale_residual = dec_scale_residual
)
self.to_logits = nn.Linear(dec_dim, num_tokens)
# deepnet has special init of weight matrices
if use_deepnet:
deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625)
deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25)
def forward_without_retrieval(
self,
seq
):
# embed sequence
embed = self.token_emb(seq)
embed = embed[:, :self.seq_len]
# get absolute positional embedding
pos_emb = self.pos_emb(torch.arange(embed.shape[1], device = embed.device))
pos_emb = rearrange(pos_emb, 'n d -> 1 n d')
embed = embed + pos_emb
embed = self.to_decoder_model_dim(embed)
embed = self.decoder(embed)
# project to logits
return self.to_logits(embed)
def forward(
self,
seq,
retrieved = None,
return_loss = False
):
"""
b - batch
n - sequence length / chunk length
k - number of chunks
d - feature dimension
r - num retrieved neighbors
"""
if not exists(retrieved):
return self.forward_without_retrieval(seq)
assert not (return_loss and not self.training), 'must be training if returning loss'
# assume padding token id (usually 0.) is to be masked out
mask = retrieved != self.pad_id
# handle some user inputs
if retrieved.ndim == 3:
retrieved = rearrange(retrieved, 'b k n -> b k 1 n') # 1 neighbor retrieved
# if training, derive labels
if return_loss:
seq, labels = seq[:, :-1], seq[:, 1:]
# variables
n, num_chunks, num_neighbors, chunk_size, retrieved_shape, device = seq.shape[-1], *retrieved.shape[-3:], retrieved.shape, seq.device
assert chunk_size >= self.chunk_size, 'chunk size of retrieval input must be greater or equal to the designated chunk_size on RETRO initialization'
num_seq_chunks = n // self.chunk_size
assert num_chunks == num_seq_chunks, f'sequence requires {num_seq_chunks} retrieved chunks, but only {num_chunks} passed in'
# sequence index at which k-nearest neighbors have not been fetched yet after
seq_index = num_seq_chunks * self.chunk_size
# embed both sequence and retrieved chunks
embed = self.token_emb(seq)
retrieved = self.token_emb(retrieved)
# get absolute positional embedding
pos_emb = self.pos_emb(torch.arange(n, device = device))
pos_emb = rearrange(pos_emb, 'n d -> 1 n d')
embed = embed + pos_emb
# handle masks for encoder and decoder, if needed
encoder_retrieved_mask = decoder_retrieved_mask = None
if exists(mask):
assert mask.shape == retrieved_shape, 'retrieval mask must be of the same shape as the retrieval tokens'
encoder_retrieved_mask = rearrange(mask, 'b k r n -> (b k r) n')
decoder_retrieved_mask = mask
# project both sequence embedding and retrieved embedding to decoder dimension if necessary
embed = self.to_decoder_model_dim(embed)
# decode
embed = self.decoder(
embed,
encoder = self.encoder,
context_mask = decoder_retrieved_mask,
encoder_retrieved_mask = encoder_retrieved_mask,
retrieved = retrieved
)
# project to logits
logits = self.to_logits(embed)
if not return_loss:
return logits
# cross entropy loss
loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels, ignore_index = self.pad_id)
return loss
| RETRO-pytorch-main | retro_pytorch/retro_pytorch.py |
from retro_pytorch.retro_pytorch import RETRO
from retro_pytorch.data import RETRODataset
from retro_pytorch.training import TrainingWrapper
| RETRO-pytorch-main | retro_pytorch/__init__.py |
import os
import numpy as np
from pathlib import Path
from shutil import rmtree
from contextlib import contextmanager
def is_true_env_flag(env_flag):
return os.getenv(env_flag, 'false').lower() in ('true', '1', 't')
def reset_folder_(p):
path = Path(p)
rmtree(path, ignore_errors = True)
path.mkdir(exist_ok = True, parents = True)
@contextmanager
def memmap(*args, **kwargs):
pointer = np.memmap(*args, **kwargs)
yield pointer
del pointer
| RETRO-pytorch-main | retro_pytorch/utils.py |
from pathlib import Path
from math import ceil
import torch
import torch.nn.functional as F
import logging
import numpy as np
from einops import rearrange
import faiss
from autofaiss import build_index
from retro_pytorch.utils import memmap, reset_folder_
# constants
SOS_ID = 101
EOS_ID = 102
BERT_MODEL_DIM = 768
BERT_VOCAB_SIZE = 28996
TMP_PATH = Path('./.tmp')
INDEX_FOLDER_PATH = TMP_PATH / '.index'
EMBEDDING_TMP_SUBFOLDER = 'embeddings'
# helper functions
def exists(val):
return val is not None
def range_chunked(max_value, *, batch_size):
counter = 0
while counter < max_value:
curr = counter + batch_size
curr = min(curr, max_value)
yield slice(counter, curr)
counter = curr
# indexing helper functions
def faiss_read_index(path):
return faiss.read_index(str(path), faiss.IO_FLAG_MMAP | faiss.IO_FLAG_READ_ONLY)
# singleton globals
MODEL = None
TOKENIZER = None
def get_tokenizer():
global TOKENIZER
if not exists(TOKENIZER):
TOKENIZER = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased')
return TOKENIZER
def get_bert():
global MODEL
if not exists(MODEL):
MODEL = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-cased')
if torch.cuda.is_available():
MODEL = MODEL.cuda()
return MODEL
# tokenize
def tokenize(texts, add_special_tokens = True):
if not isinstance(texts, (list, tuple)):
texts = [texts]
tokenizer = get_tokenizer()
encoding = tokenizer.batch_encode_plus(
texts,
add_special_tokens = add_special_tokens,
padding = True,
return_tensors = 'pt'
)
token_ids = encoding.input_ids
return token_ids
# text to chunks
def doc_text_to_chunks_and_seq_indices(
*,
doc_text,
chunk_size = 64,
seq_len = 2048,
pad_id = 0
):
assert (seq_len % chunk_size) == 0, 'sequence length must be divisible by chunk size'
ids = tokenize(doc_text)
ids = rearrange(ids, '1 ... -> ...')
text_len = ids.shape[-1]
# pad to multiple of chunk size with an extra token
padding = chunk_size - ((text_len - 1) % chunk_size)
ids = F.pad(ids, (0, padding))
# split out very last token
ids, last_token = ids[:-1], ids[-1:]
ids = rearrange(ids, '(n c) -> n c', c = chunk_size)
# first tokens of chunk [2:] and on will become the last token of chunk [1:]
last_token_per_chunk = ids[1:, 0]
all_last_tokens = torch.cat((last_token_per_chunk, last_token), dim = 0)
all_last_tokens = rearrange(all_last_tokens, 'n -> n 1')
# append all last tokens to ids for (num_chunks, chunk_size + 1)
chunks_with_extra_token = torch.cat((ids, all_last_tokens), dim = -1)
# calculate chunk indices starting at 0, spaced number of chunks of seq len apart
total_chunks = ids.shape[0]
num_chunks_per_seq = seq_len // chunk_size
seq = torch.arange(0, total_chunks, num_chunks_per_seq)
return chunks_with_extra_token, seq
def text_folder_to_chunks_(
*,
folder,
chunks_memmap_path,
seqs_memmap_path,
doc_ids_memmap_path,
chunk_size = 64,
seq_len = 2048,
glob = '**/*.txt',
max_chunks = 1_000_000,
max_seqs = 100_000
):
paths = sorted([*Path(folder).glob(glob)])
total_chunks = 0
total_docs = 0
total_seqs = 0
chunks_shape = (max_chunks, chunk_size + 1)
seqs_shape = (max_seqs,)
doc_ids_shape = (max_chunks,)
with memmap(chunks_memmap_path, shape = chunks_shape, dtype = np.int32, mode = 'w+') as chunks_memmap\
, memmap(seqs_memmap_path, shape = seqs_shape, dtype = np.int32, mode = 'w+') as seqs_memmap\
, memmap(doc_ids_memmap_path, shape = doc_ids_shape, dtype = np.int32, mode = 'w+') as doc_ids_memmap:
for path in paths:
print(f'processing {path}')
chunks, seq = doc_text_to_chunks_and_seq_indices(
doc_text = path.read_text(),
chunk_size = chunk_size,
seq_len = seq_len
)
doc_chunk_len = chunks.shape[0]
doc_seq_len = seq.shape[0]
chunks_memmap[total_chunks:(total_chunks + doc_chunk_len)] = chunks.numpy()
seqs_memmap[total_seqs:(total_seqs + doc_seq_len)] = seq.numpy() + total_chunks
doc_ids_memmap[total_chunks:(total_chunks + doc_chunk_len)] = np.full((doc_chunk_len,), total_docs)
total_chunks += doc_chunk_len
total_seqs += doc_seq_len
total_docs += 1
return dict(
chunks = total_chunks,
docs = total_docs,
seqs = total_seqs
)
# embedding function
@torch.no_grad()
def bert_embed(
token_ids,
return_cls_repr = False,
eps = 1e-8,
pad_id = 0.
):
model = get_bert()
mask = token_ids != pad_id
if torch.cuda.is_available():
token_ids = token_ids.cuda()
mask = mask.cuda()
outputs = model(
input_ids = token_ids,
attention_mask = mask,
output_hidden_states = True
)
hidden_state = outputs.hidden_states[-1]
if return_cls_repr:
return hidden_state[:, 0] # return [cls] as representation
if not exists(mask):
return hidden_state.mean(dim = 1)
mask = mask[:, 1:] # mean all tokens excluding [cls], accounting for length
mask = rearrange(mask, 'b n -> b n 1')
numer = (hidden_state[:, 1:] * mask).sum(dim = 1)
denom = mask.sum(dim = 1)
masked_mean = numer / (denom + eps)
return masked_mean
# chunks to knn
def chunks_to_embeddings_(
*,
num_chunks,
chunks_memmap_path,
embeddings_memmap_path,
chunk_size = 64,
embed_dim = BERT_MODEL_DIM,
batch_size = 16,
use_cls_repr = False,
pad_id = 0.
):
chunks_shape = (num_chunks, chunk_size + 1)
embed_shape = (num_chunks, embed_dim)
with memmap(chunks_memmap_path, shape = chunks_shape, dtype = np.int32) as chunks\
, memmap(embeddings_memmap_path, shape = embed_shape, dtype = np.float32, mode = 'w+') as embeddings:
for dim_slice in range_chunked(num_chunks, batch_size = batch_size):
batch_chunk_npy = chunks[dim_slice]
batch_chunk = torch.from_numpy(batch_chunk_npy)
cls_tokens = torch.full((batch_chunk.shape[0], 1), SOS_ID)
batch_chunk = torch.cat((cls_tokens, batch_chunk), dim = 1)
batch_chunk = batch_chunk[:, :-1] # omit last token, the first token of the next chunk, used for autoregressive training
batch_embed = bert_embed(
batch_chunk,
return_cls_repr = use_cls_repr
)
embeddings[dim_slice] = batch_embed.detach().cpu().numpy()
print(f'embedded {dim_slice.stop} / {num_chunks}')
def memmap_file_to_chunks_(
memmap_path,
*,
folder,
shape,
dtype,
max_rows_per_file = 500
):
rows, _ = shape
with memmap(memmap_path, shape = shape, dtype = dtype, mode = 'r') as f:
root_path = TMP_PATH / folder
reset_folder_(root_path)
for ind, dim_slice in enumerate(range_chunked(rows, batch_size = max_rows_per_file)):
filename = root_path / f'{ind:05d}.npy'
data_slice = f[dim_slice]
np.save(str(filename), f[dim_slice])
print(f'saved {str(filename)}')
def index_embeddings(
embeddings_folder,
*,
index_file = 'knn.index',
index_infos_file = 'index_infos.json',
max_index_memory_usage = '100m',
current_memory_available = '1G'
):
embeddings_path = TMP_PATH / embeddings_folder
index_path = INDEX_FOLDER_PATH / index_file
reset_folder_(INDEX_FOLDER_PATH)
build_index(
embeddings = str(embeddings_path),
index_path = str(index_path),
index_infos_path = str(INDEX_FOLDER_PATH / index_infos_file),
metric_type = "l2",
max_index_memory_usage = max_index_memory_usage,
current_memory_available = current_memory_available,
make_direct_map = True,
should_be_memory_mappable = False,
use_gpu = torch.cuda.is_available(),
)
index = faiss_read_index(index_path)
return index
def chunks_to_index_and_embed(
*,
num_chunks,
chunk_size,
chunk_memmap_path,
use_cls_repr = False,
max_rows_per_file = 500,
chunks_to_embeddings_batch_size = 16,
embed_dim = BERT_MODEL_DIM,
index_file = 'knn.index',
**index_kwargs
):
embedding_path = f'{chunk_memmap_path}.embedded'
embed_shape = (num_chunks, embed_dim)
chunks_to_embeddings_(
num_chunks = num_chunks,
chunk_size = chunk_size,
chunks_memmap_path = chunk_memmap_path,
embeddings_memmap_path = embedding_path,
use_cls_repr = use_cls_repr,
batch_size = chunks_to_embeddings_batch_size,
embed_dim = embed_dim
)
memmap_file_to_chunks_(
embedding_path,
shape = embed_shape,
dtype = np.float32,
folder = EMBEDDING_TMP_SUBFOLDER,
max_rows_per_file = max_rows_per_file
)
index = index_embeddings(
embeddings_folder = EMBEDDING_TMP_SUBFOLDER,
index_file = index_file,
**index_kwargs
)
embeddings = np.memmap(embedding_path, shape = embed_shape, dtype = np.float32, mode = 'r')
return index, embeddings
def chunks_to_precalculated_knn_(
*,
num_nearest_neighbors,
num_chunks,
chunk_size,
chunk_memmap_path,
doc_ids_memmap_path,
use_cls_repr = False,
max_rows_per_file = 500,
chunks_to_embeddings_batch_size = 16,
embed_dim = BERT_MODEL_DIM,
num_extra_neighbors = 10,
force_reprocess = False,
index_file = 'knn.index',
**index_kwargs
):
chunk_path = Path(chunk_memmap_path)
knn_path = chunk_path.parents[0] / f'{chunk_path.stem}.knn{chunk_path.suffix}'
index_path = INDEX_FOLDER_PATH / index_file
# early return knn path and faiss index
# unless if force_reprocess is True
if index_path.exists() and knn_path.exists() and not force_reprocess:
print(f'preprocessed knn found at {str(knn_path)}, faiss index reconstituted from {str(index_path)}')
index = faiss_read_index(index_path)
return knn_path, index
# fetch the faiss index and calculated embeddings for the chunks
index, embeddings = chunks_to_index_and_embed(
num_chunks = num_chunks,
chunk_size = chunk_size,
chunk_memmap_path = chunk_memmap_path,
index_file = index_file,
**index_kwargs
)
total_neighbors_to_fetch = num_extra_neighbors + num_nearest_neighbors + 1
with memmap(knn_path, shape = (num_chunks, num_nearest_neighbors), dtype = np.int32, mode = 'w+') as knns\
, memmap(doc_ids_memmap_path, shape = (num_chunks,), dtype = np.int32, mode = 'r') as doc_ids:
for dim_slice in range_chunked(num_chunks, batch_size = max_rows_per_file):
query_vector = embeddings[dim_slice]
distances, indices = index.search(query_vector, k = total_neighbors_to_fetch)
# remove self from distances and indices
distances = distances[:, 1:]
indices = indices[:, 1:]
# mask out any neighbors that belong to the same document to -1
query_doc_ids = doc_ids[dim_slice]
neighbor_doc_ids = doc_ids[indices]
neighbor_from_same_doc = query_doc_ids[..., None] == neighbor_doc_ids
indices = np.where(neighbor_from_same_doc, -1, indices)
distances = np.where(neighbor_from_same_doc, 1e3, distances)
# re-sort indices by updated distances
indices = np.take_along_axis(indices, np.argsort(distances, axis = 1), axis = 1)
# store nearest neighbors to knn memmap
knns[dim_slice] = indices[:, :num_nearest_neighbors]
print(f'knns calculated for {dim_slice.stop} / {num_chunks}')
print(f'knn saved to {knn_path}')
return knn_path, index
| RETRO-pytorch-main | retro_pytorch/retrieval.py |
from torch.optim import AdamW
def separate_weight_decayable_params(params):
no_wd_params = set([param for param in params if param.ndim < 2])
wd_params = set(params) - no_wd_params
return wd_params, no_wd_params
def get_optimizer(params, lr = 3e-4, wd = 1e-1, filter_by_requires_grad = False):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
params = set(params)
wd_params, no_wd_params = separate_weight_decayable_params(params)
param_groups = [
{'params': list(wd_params)},
{'params': list(no_wd_params), 'weight_decay': 0},
]
return AdamW(param_groups, lr = lr, weight_decay = wd)
| RETRO-pytorch-main | retro_pytorch/optimizer.py |
import numpy as np
from functools import partial
import json
from pathlib import Path
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from retro_pytorch import RETRO, RETRODataset
from retro_pytorch.data import knn_to_retrieved_chunks
from retro_pytorch.optimizer import get_optimizer
from retro_pytorch.retrieval import text_folder_to_chunks_, chunks_to_precalculated_knn_, bert_embed, SOS_ID, EOS_ID
from retro_pytorch.utils import memmap, is_true_env_flag
from einops import rearrange
# helpers
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def safe_cat(accum, t, dim = -1):
if not exists(accum):
return t
return torch.cat((accum, t), dim = dim)
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# function that returns knn chunks from seq chunks
#
# 1. adds sos and eos to seq chunks
# 2. embeds the seq chunks with special tokens with frozen BERT
# 3. fetches the knn indices with faiss
# 4. gets the knn chunks as well as the continuation from a reference to the chunks data (memmap)
#
def knn_chunks_from_seq_chunks(
seq_chunks,
*,
knn,
faiss_index,
num_chunks,
chunk_size,
chunks_memmap_path,
):
b, device = seq_chunks.shape[0], seq_chunks.device
# prepare last chunk with sos and eos tokens for BERT embed
ones = torch.ones((b, 1), dtype = torch.bool, device = device)
sos = ones * SOS_ID
eos = ones * EOS_ID
seq_chunks = torch.cat((sos, seq_chunks, eos), dim = 1)
# embed with frozen BERT
embeds = bert_embed(seq_chunks.cpu()) # fetch embeds on CPU for now
# retrieval of knn with faiss
_, knn_indices = faiss_index.search(embeds.cpu().numpy(), k = knn)
# numpy to torch
with memmap(chunks_memmap_path, dtype = np.int32, shape = (num_chunks + 1, chunk_size + 1)) as chunk_memmap:
knn_chunks = knn_to_retrieved_chunks(
knn_indices,
chunk_memmap,
add_continuations = True,
num_chunks = num_chunks
)
knn_chunks_torch = torch.from_numpy(knn_chunks).to(device)
return knn_chunks_torch
# training wrapper class
class TrainingWrapper(nn.Module):
def __init__(
self,
*,
retro,
chunk_size,
documents_path,
knn,
glob = '**/*.txt',
chunks_memmap_path = './train.chunks.dat',
seqs_memmap_path = './train.seq.dat',
doc_ids_memmap_path = './train.doc_ids.dat',
max_chunks = 1_000_000,
max_seqs = 100_000,
knn_extra_neighbors = 100,
processed_stats_json_path = './processed-stats.json',
faiss_index_filename = 'knn.index',
**index_kwargs
):
super().__init__()
assert isinstance(retro, RETRO), 'retro must be instance of RETRO'
self.retro = retro
force_reprocess = is_true_env_flag('REPROCESS')
# store the processed training data statistics
# number of chunks, number of sequences
stats_path = Path(processed_stats_json_path)
# if the statistics file does not exist, process folders of text
# force reprocess by setting REPROCESS=1 when running training script
if not stats_path.exists() or force_reprocess:
self.stats = text_folder_to_chunks_(
folder = documents_path,
glob = glob,
chunks_memmap_path = chunks_memmap_path,
seqs_memmap_path = seqs_memmap_path,
doc_ids_memmap_path = doc_ids_memmap_path,
chunk_size = chunk_size,
seq_len = retro.seq_len,
max_chunks = max_chunks,
max_seqs = max_seqs
)
with open(processed_stats_json_path, 'w') as f:
json.dump(self.stats, f)
else:
print(f'found to be previously processed at {str(stats_path)}')
self.stats = json.loads(stats_path.read_text())
# get number of chunks and number of sequences
num_chunks = self.stats['chunks']
num_seqs = self.stats['seqs']
# calculate knn memmap path and get the faiss index
# todo - make sure if faiss_index_filename is found, do not reprocess unless flag is given
knn_memmap_path, faiss_index = chunks_to_precalculated_knn_(
num_chunks = num_chunks,
chunk_size = chunk_size,
chunk_memmap_path = chunks_memmap_path,
doc_ids_memmap_path = doc_ids_memmap_path,
num_nearest_neighbors = knn,
num_extra_neighbors = knn_extra_neighbors,
index_file = faiss_index_filename,
force_reprocess = force_reprocess,
**index_kwargs
)
# retro dataset
self.ds = RETRODataset(
num_sequences = num_seqs,
num_chunks = num_chunks,
num_neighbors = knn,
chunk_size = chunk_size,
seq_len = retro.seq_len,
chunk_memmap_path = chunks_memmap_path,
chunk_nn_memmap_path = knn_memmap_path,
seq_memmap_path = seqs_memmap_path
)
# params needed for generation
self.chunk_size = chunk_size
self.max_seq_len = self.retro.seq_len
self.fetch_knn_chunks_fn = partial(
knn_chunks_from_seq_chunks,
knn = knn,
chunk_size = chunk_size,
num_chunks = num_chunks,
chunks_memmap_path = chunks_memmap_path,
faiss_index = faiss_index
)
@torch.no_grad()
@eval_decorator
def generate(
self,
start = None,
retrieved = None,
filter_fn = top_k,
filter_thres = 0.9,
temperature = 1.0,
):
assert filter_fn in {top_k, top_p}, 'filter function must be either top-k or nucleus'
device = next(self.retro.parameters()).device
# if not prime tokens given, assume sampling from SOS token with batch size of 1
if not exists(start):
start = torch.full((1, 1), SOS_ID, device = device).long()
b, start_seq_len = start.shape
# move onto same device as RETRO
start = start.to(device)
# prepare retrieval related variables
if start_seq_len >= self.chunk_size:
seq_index = (start_seq_len // self.chunk_size) * self.chunk_size
past_seq_chunks = rearrange(start[:, :seq_index], 'b (n c) -> (b n) c', c = self.chunk_size)
retrieved = self.fetch_knn_chunks_fn(past_seq_chunks)
retrieved = rearrange(retrieved, '(b n) k c -> b n k c', b = b)
# get starting sequence index
out = start
# sampling loop
for i in range(start_seq_len - 1, self.max_seq_len):
logits = self.retro(out, retrieved = retrieved)
logits = logits[:, i]
logits = filter_fn(logits, thres = filter_thres)
sampled = gumbel_sample(logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
out = torch.cat((out, sampled), dim = 1)
# early terminate if all EOS
is_eos_tokens = (out == EOS_ID)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.retro.pad_id)
break
# when the sequence length is a multiple of the chunk size
# retrieve the next set of knns
curr_seq_len = out.shape[-1]
if (curr_seq_len % self.chunk_size) == 0:
last_chunk = rearrange(out, 'b (c n) -> b c n', n = self.chunk_size)[:, -1]
knn_chunks = self.fetch_knn_chunks_fn(last_chunk)
# concat retrieved knn chunks to all retrieved
# to be sent to Retro for chunked cross attention at the next iteration
knn_chunks = rearrange(knn_chunks, 'b k r -> b 1 k r')
retrieved = safe_cat(retrieved, knn_chunks, dim = 1)
print(f'retrieved at {curr_seq_len} / {self.max_seq_len}')
return out
def get_dataloader(self, **kwargs):
return DataLoader(self.ds, **kwargs)
def get_optimizer(self, **kwargs):
return get_optimizer(self.retro.parameters(), **kwargs)
def forward(self):
raise NotImplemented
| RETRO-pytorch-main | retro_pytorch/training.py |
from functools import partial
import numpy as np
import torch
from torch.utils.data import Dataset
from retro_pytorch.retrieval import EOS_ID
from retro_pytorch.utils import memmap
# knn to retrieved chunks
def knn_to_retrieved_chunks(
knns,
chunks_memmap,
*,
add_continuations,
num_chunks,
pad_id = 0,
eos_id = EOS_ID,
):
# derive mask for no neighbors found (-1)
no_neighbor_mask = knns == -1
knns = np.maximum(knns, 0)
# get neighbor and continuation chunks
knn_chunks = chunks_memmap[knns]
is_last_document_chunk = np.any(knn_chunks == eos_id, axis = -1, keepdims = True)
# use presence of [EOS] in chunk as way to detect document boundaries
# [EOS] in BERT tokenizer is 102
retrieved = knn_chunks[..., :-1]
if add_continuations:
continuation_indices = np.clip(knns + 1, 0, num_chunks - 1) # chunks are stored contiguously
continuation_chunks = chunks_memmap[continuation_indices][..., :-1]
continuation_chunks *= ~is_last_document_chunk
# combine neighbors with continuations
retrieved = np.concatenate((retrieved, continuation_chunks), axis = -1)
# mask out any nearest neighbor chunks that was -1 (not found at index time) to padding id
retrieved = np.where(~no_neighbor_mask[..., None], retrieved, pad_id)
return retrieved
# dataset
class RETRODataset(Dataset):
def __init__(
self,
*,
num_chunks,
chunk_size,
seq_len,
num_sequences,
num_neighbors,
chunk_memmap_path,
chunk_nn_memmap_path,
seq_memmap_path,
eos_id = EOS_ID,
pad_id = 0.,
add_continuations = True
):
super().__init__()
self.num_chunks = num_chunks
self.num_sequences = num_sequences
self.seq_num_chunks = seq_len // chunk_size
self.eos_id = eos_id
self.pad_id = pad_id
num_chunks_with_padding = num_chunks + self.seq_num_chunks
chunks_shape = (num_chunks_with_padding, chunk_size + 1)
knn_shape = (num_chunks_with_padding, num_neighbors)
self.add_continuations = add_continuations
self.get_chunks = partial(memmap, chunk_memmap_path, dtype = np.int32, shape = chunks_shape)
self.get_knns = partial(memmap, chunk_nn_memmap_path, dtype = np.int32, shape = knn_shape)
self.get_seqs = partial(memmap, seq_memmap_path, dtype = np.int32, shape = (num_sequences,))
def __len__(self):
return self.num_sequences
def __getitem__(self, ind):
with self.get_chunks() as chunks_memmap, self.get_knns() as knns_memmap, self.get_seqs() as seqs_memmap:
begin_chunk_index = seqs_memmap[ind]
chunk_range = slice(begin_chunk_index, (begin_chunk_index + self.seq_num_chunks))
chunks = chunks_memmap[chunk_range]
# excise the last token, except for last token of last chunk
seq_tokens = np.concatenate((chunks[:, :-1].flatten(), chunks[-1, -1:]))
# mask out (with padding tokens) any token following an <eos> | disallow having more than 1 document in a sequence, as it would break RETRO's CCA
seq_mask = np.cumsum(seq_tokens == self.eos_id, axis = 0)
seq_mask = np.pad(seq_mask, (1, 0))[:-1] == 0.
seq_tokens = np.where(seq_mask, seq_tokens, 0.)
# derive retrieved tokens
knns = knns_memmap[chunk_range]
retrieved = knn_to_retrieved_chunks(
knns,
chunks_memmap,
add_continuations = self.add_continuations,
eos_id = self.eos_id,
num_chunks = self.num_chunks
)
seq_tokens_torch = torch.from_numpy(seq_tokens).long()
retrieved_torch = torch.from_numpy(retrieved).long()
return seq_tokens_torch, retrieved_torch
| RETRO-pytorch-main | retro_pytorch/data.py |
from setuptools import setup, find_packages
setup(
name = 'fast-transformer-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Fast Transformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/fast-transformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers'
],
install_requires=[
'einops>=0.3',
'rotary-embedding-torch',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| fast-transformer-pytorch-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce
from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# helper classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
# blocks
def FeedForward(dim, mult = 4):
return nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
class FastAttention(nn.Module):
def __init__(
self,
dim,
*,
heads = 8,
dim_head = 64,
max_seq_len = None,
pos_emb = None
):
super().__init__()
inner_dim = heads * dim_head
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
# rotary positional embedding
assert not (exists(pos_emb) and not exists(max_seq_len)), 'max_seq_len must be passed in if to use rotary positional embeddings'
self.pos_emb = pos_emb
self.max_seq_len = max_seq_len
# if using relative positional encoding, make sure to reduce pairs of consecutive feature dimension before doing projection to attention logits
kv_attn_proj_divisor = 1 if not exists(pos_emb) else 2
self.to_q_attn_logits = nn.Linear(dim_head, 1, bias = False) # for projecting queries to query attention logits
self.to_k_attn_logits = nn.Linear(dim_head // kv_attn_proj_divisor, 1, bias = False) # for projecting keys to key attention logits
# final transformation of values to "r" as in the paper
self.to_r = nn.Linear(dim_head // kv_attn_proj_divisor, dim_head)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, mask = None):
n, device, h, use_rotary_emb = x.shape[1], x.device, self.heads, exists(self.pos_emb)
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
mask_value = -torch.finfo(x.dtype).max
mask = rearrange(mask, 'b n -> b () n')
# if relative positional encoding is needed
if use_rotary_emb:
freqs = self.pos_emb(torch.arange(self.max_seq_len, device = device), cache_key = self.max_seq_len)
freqs = rearrange(freqs[:n], 'n d -> () () n d')
q_aggr, k_aggr, v_aggr = map(lambda t: apply_rotary_emb(freqs, t), (q, k, v))
else:
q_aggr, k_aggr, v_aggr = q, k, v
# calculate query attention logits
q_attn_logits = rearrange(self.to_q_attn_logits(q), 'b h n () -> b h n') * self.scale
q_attn_logits = q_attn_logits.masked_fill(~mask, mask_value)
q_attn = q_attn_logits.softmax(dim = -1)
# calculate global query token
global_q = einsum('b h n, b h n d -> b h d', q_attn, q_aggr)
global_q = rearrange(global_q, 'b h d -> b h () d')
# bias keys with global query token
k = k * global_q
# if using rotary embeddings, do an inner product between adjacent pairs in the feature dimension
if use_rotary_emb:
k = reduce(k, 'b h n (d r) -> b h n d', 'sum', r = 2)
# now calculate key attention logits
k_attn_logits = rearrange(self.to_k_attn_logits(k), 'b h n () -> b h n') * self.scale
k_attn_logits = k_attn_logits.masked_fill(~mask, mask_value)
k_attn = k_attn_logits.softmax(dim = -1)
# calculate global key token
global_k = einsum('b h n, b h n d -> b h d', k_attn, k_aggr)
global_k = rearrange(global_k, 'b h d -> b h () d')
# bias the values
u = v_aggr * global_k
# if using rotary embeddings, do an inner product between adjacent pairs in the feature dimension
if use_rotary_emb:
u = reduce(u, 'b h n (d r) -> b h n d', 'sum', r = 2)
# transformation step
r = self.to_r(u)
# paper then says to add the queries as a residual
r = r + q
# combine heads
r = rearrange(r, 'b h n d -> b n (h d)')
return self.to_out(r)
# main class
class FastTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
heads = 8,
dim_head = 64,
ff_mult = 4,
absolute_pos_emb = False
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
# positional embeddings
self.abs_pos_emb = nn.Embedding(max_seq_len, dim) if absolute_pos_emb else None
layer_pos_emb = None
if not absolute_pos_emb:
assert (dim_head % 4) == 0, 'dimension of the head must be divisible by 4 to use rotary embeddings'
layer_pos_emb = RotaryEmbedding(dim_head // 2)
# layers
self.layers = nn.ModuleList([])
for _ in range(depth):
attn = FastAttention(dim, dim_head = dim_head, heads = heads, pos_emb = layer_pos_emb, max_seq_len = max_seq_len)
ff = FeedForward(dim, mult = ff_mult)
self.layers.append(nn.ModuleList([
PreNorm(dim, attn),
PreNorm(dim, ff)
]))
# weight tie projections across all layers
first_block, _ = self.layers[0]
for block, _ in self.layers[1:]:
block.fn.to_q_attn_logits = first_block.fn.to_q_attn_logits
block.fn.to_k_attn_logits = first_block.fn.to_k_attn_logits
# to logits
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(
self,
x,
mask = None
):
n, device = x.shape[1], x.device
x = self.token_emb(x)
if exists(self.abs_pos_emb):
pos_emb = self.abs_pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> () n d')
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
x = ff(x) + x
return self.to_logits(x)
| fast-transformer-pytorch-main | fast_transformer_pytorch/fast_transformer_pytorch.py |
from fast_transformer_pytorch.fast_transformer_pytorch import FastTransformer
| fast-transformer-pytorch-main | fast_transformer_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'uformer-pytorch',
packages = find_packages(),
version = '0.0.8',
license='MIT',
description = 'Uformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/uformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'image segmentation',
'unet'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| uformer-pytorch-main | setup.py |
from uformer_pytorch.uformer_pytorch import Uformer
| uformer-pytorch-main | uformer_pytorch/__init__.py |
import math
from math import log, pi, sqrt
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# constants
List = nn.ModuleList
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, tuple) else (val,) * depth
# positional embeddings
def apply_rotary_emb(q, k, pos_emb):
sin, cos = pos_emb
dim_rotary = sin.shape[-1]
(q, q_pass), (k, k_pass) = map(lambda t: (t[..., :dim_rotary], t[..., dim_rotary:]), (q, k))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
q, k = map(lambda t: torch.cat(t, dim = -1), ((q, q_pass), (k, k_pass)))
return q, k
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
class AxialRotaryEmbedding(nn.Module):
def __init__(self, dim, max_freq = 10):
super().__init__()
self.dim = dim
scales = torch.linspace(1., max_freq / 2, self.dim // 4)
self.register_buffer('scales', scales)
def forward(self, x):
device, dtype, h, w = x.device, x.dtype, *x.shape[-2:]
seq_x = torch.linspace(-1., 1., steps = h, device = device)
seq_x = seq_x.unsqueeze(-1)
seq_y = torch.linspace(-1., 1., steps = w, device = device)
seq_y = seq_y.unsqueeze(-1)
scales = self.scales[(*((None,) * (len(seq_x.shape) - 1)), Ellipsis)]
scales = scales.to(x)
scales = self.scales[(*((None,) * (len(seq_y.shape) - 1)), Ellipsis)]
scales = scales.to(x)
seq_x = seq_x * scales * pi
seq_y = seq_y * scales * pi
x_sinu = repeat(seq_x, 'i d -> i j d', j = w)
y_sinu = repeat(seq_y, 'j d -> i j d', i = h)
sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)
cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)
sin, cos = map(lambda t: rearrange(t, 'i j d -> i j d'), (sin, cos))
sin, cos = map(lambda t: repeat(t, 'i j d -> () i j (d r)', r = 2), (sin, cos))
return sin, cos
class TimeSinuPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = device) * -emb)
emb = einsum('i, j -> i j', x, emb)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb
# helper classes
class LayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Attention(nn.Module):
def __init__(self, dim, dim_head = 64, heads = 8, window_size = 16):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.window_size = window_size
inner_dim = dim_head * heads
self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False)
self.to_kv = nn.Conv2d(dim, inner_dim * 2, 1, bias = False)
self.to_out = nn.Conv2d(inner_dim, dim, 1)
def forward(self, x, skip = None, time_emb = None, pos_emb = None):
h, w, b = self.heads, self.window_size, x.shape[0]
if exists(time_emb):
time_emb = rearrange(time_emb, 'b c -> b c () ()')
x = x + time_emb
q = self.to_q(x)
kv_input = x
if exists(skip):
kv_input = torch.cat((kv_input, skip), dim = 0)
k, v = self.to_kv(kv_input).chunk(2, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) x y c', h = h), (q, k, v))
if exists(pos_emb):
q, k = apply_rotary_emb(q, k, pos_emb)
q, k, v = map(lambda t: rearrange(t, 'b (x w1) (y w2) c -> (b x y) (w1 w2) c', w1 = w, w2 = w), (q, k, v))
if exists(skip):
k, v = map(lambda t: rearrange(t, '(r b) n d -> b (r n) d', r = 2), (k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h x y) (w1 w2) c -> b (h c) (x w1) (y w2)', b = b, h = h, y = x.shape[-1] // w, w1 = w, w2 = w)
return self.to_out(out)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
hidden_dim = dim * mult
self.project_in = nn.Conv2d(dim, hidden_dim, 1)
self.project_out = nn.Sequential(
nn.Conv2d(hidden_dim, hidden_dim, 3, padding = 1),
nn.GELU(),
nn.Conv2d(hidden_dim, dim, 1)
)
def forward(self, x, time_emb = None):
x = self.project_in(x)
if exists(time_emb):
time_emb = rearrange(time_emb, 'b c -> b c () ()')
x = x + time_emb
return self.project_out(x)
class Block(nn.Module):
def __init__(
self,
dim,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
window_size = 16,
time_emb_dim = None,
rotary_emb = True
):
super().__init__()
self.attn_time_emb = None
self.ff_time_emb = None
if exists(time_emb_dim):
self.attn_time_emb = nn.Sequential(nn.GELU(), nn.Linear(time_emb_dim, dim))
self.ff_time_emb = nn.Sequential(nn.GELU(), nn.Linear(time_emb_dim, dim * ff_mult))
self.pos_emb = AxialRotaryEmbedding(dim_head) if rotary_emb else None
self.layers = List([])
for _ in range(depth):
self.layers.append(List([
PreNorm(dim, Attention(dim, dim_head = dim_head, heads = heads, window_size = window_size)),
PreNorm(dim, FeedForward(dim, mult = ff_mult))
]))
def forward(self, x, skip = None, time = None):
attn_time_emb = None
ff_time_emb = None
if exists(time):
assert exists(self.attn_time_emb) and exists(self.ff_time_emb), 'time_emb_dim must be given on init if you are conditioning based on time'
attn_time_emb = self.attn_time_emb(time)
ff_time_emb = self.ff_time_emb(time)
pos_emb = None
if exists(self.pos_emb):
pos_emb = self.pos_emb(x)
for attn, ff in self.layers:
x = attn(x, skip = skip, time_emb = attn_time_emb, pos_emb = pos_emb) + x
x = ff(x, time_emb = ff_time_emb) + x
return x
# classes
class Uformer(nn.Module):
def __init__(
self,
dim = 64,
channels = 3,
stages = 4,
num_blocks = 2,
dim_head = 64,
window_size = 16,
heads = 8,
ff_mult = 4,
time_emb = False,
input_channels = None,
output_channels = None
):
super().__init__()
input_channels = default(input_channels, channels)
output_channels = default(output_channels, channels)
self.to_time_emb = None
time_emb_dim = None
if time_emb:
time_emb_dim = dim
self.to_time_emb = nn.Sequential(
TimeSinuPosEmb(dim),
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim)
)
self.project_in = nn.Sequential(
nn.Conv2d(input_channels, dim, 3, padding = 1),
nn.GELU()
)
self.project_out = nn.Sequential(
nn.Conv2d(dim, output_channels, 3, padding = 1),
)
self.downs = List([])
self.ups = List([])
heads, window_size, dim_head, num_blocks = map(partial(cast_tuple, depth = stages), (heads, window_size, dim_head, num_blocks))
for ind, heads, window_size, dim_head, num_blocks in zip(range(stages), heads, window_size, dim_head, num_blocks):
is_last = ind == (stages - 1)
self.downs.append(List([
Block(dim, depth = num_blocks, dim_head = dim_head, heads = heads, ff_mult = ff_mult, window_size = window_size, time_emb_dim = time_emb_dim),
nn.Conv2d(dim, dim * 2, 4, stride = 2, padding = 1)
]))
self.ups.append(List([
nn.ConvTranspose2d(dim * 2, dim, 2, stride = 2),
Block(dim, depth = num_blocks, dim_head = dim_head, heads = heads, ff_mult = ff_mult, window_size = window_size, time_emb_dim = time_emb_dim)
]))
dim *= 2
if is_last:
self.mid = Block(dim = dim, depth = num_blocks, dim_head = dim_head, heads = heads, ff_mult = ff_mult, window_size = window_size, time_emb_dim = time_emb_dim)
def forward(
self,
x,
time = None
):
if exists(time):
assert exists(self.to_time_emb), 'time_emb must be set to true to condition on time'
time = time.to(x)
time = self.to_time_emb(time)
x = self.project_in(x)
skips = []
for block, downsample in self.downs:
x = block(x, time = time)
skips.append(x)
x = downsample(x)
x = self.mid(x, time = time)
for (upsample, block), skip in zip(reversed(self.ups), reversed(skips)):
x = upsample(x)
x = block(x, skip = skip, time = time)
x = self.project_out(x)
return x
| uformer-pytorch-main | uformer_pytorch/uformer_pytorch.py |
import os
import gzip
import click
import re
import random
from math import ceil
from functools import partial
from itertools import islice, chain
from operator import itemgetter
from pyfaidx import Faidx
import numpy as np
from random import random
from pathlib import Path
import toml
from google.cloud import storage
from prefect import Parameter, task, Flow
from progen_transformer.data import with_tfrecord_writer
from progen_transformer.utils import clear_directory_
# constants
GCS_WRITE_TIMEOUT = 60 * 30
TMP_DIR = Path('./.tmp')
# functions
def order_dict_by(d, fn):
keys = fn(d.keys())
return dict(tuple(map(lambda k: (k, d[k]), keys)))
def get_annotations_from_description(config, description):
taxonomy_matches = re.findall(r'Tax=([a-zA-Z\s]*)\s[a-zA-Z\=]', description)
annotations = dict()
if len(taxonomy_matches) > 0:
annotations['tax'] = taxonomy_matches[0]
return annotations
def fasta_row_to_sequence_strings(config, fa, uid):
seq_len = fa.index[uid].rlen
seq = str(fa.fetch(uid, 1, seq_len))
description = fa.get_long_name(uid)
sequences = []
annotations = get_annotations_from_description(config, description)
# todo: gather annotations from GO
if len(annotations) > 0:
sort_annot_by = random.shuffle if not config['sort_annotations'] else sorted
annotations = order_dict_by(annotations, sort_annot_by)
annotation_str = [f"[{annot_name}={annot}]" for annot_name, annot in annotations.items()]
annotation_str = ' '.join(annotation_str)
seq_annot_pair = (annotation_str, seq)
if random() <= config['prob_invert_seq_annotation']:
seq_annot_pair = tuple(reversed(seq_annot_pair))
sequence = ' # '.join(seq_annot_pair)
sequence = sequence.encode('utf-8')
sequences.append(sequence)
sequence = f'# {seq}'
sequence = sequence.encode('utf-8')
sequences.append(sequence)
return sequences
def process_and_write_to_tmp_file(i, seq_str):
filename = TMP_DIR / str(i)
with gzip.open(str(filename), 'wb') as f:
f.write(seq_str)
def foreach(fn, it):
for el in it:
fn(*el)
# DAG functions
@task
def fasta_to_tmp_files(config):
clear_directory_(TMP_DIR)
print('reading from fasta')
fa = Faidx(config['read_from'], sequence_always_upper = True)
print('filtering by length')
it = iter(fa.index.items())
it = filter(lambda el: el[1].rlen <= config['max_seq_len'], it)
print('parallel processing to tmp files')
it = islice(it, 0, config['num_samples'])
it = map(itemgetter(0), it)
fasta_to_seq_fn = partial(fasta_row_to_sequence_strings, config, fa)
it = map(fasta_to_seq_fn, it)
it = enumerate(chain.from_iterable(it))
foreach(process_and_write_to_tmp_file, it)
@task
def files_to_tfrecords(config):
filenames = [*TMP_DIR.glob('**/*')]
num_samples = len(filenames)
num_valids = ceil(config['fraction_valid_data'] * num_samples)
num_sequences_per_file = config['num_sequences_per_file']
# split out validation sequences
permuted_sequences = np.random.permutation(num_samples)
valid_seqs, train_seqs = np.split(permuted_sequences, [num_valids])
# clear directory to write to
write_to = config['write_to']
upload_gcs = write_to.startswith('gs://')
if upload_gcs:
write_to = write_to[5:]
client = storage.Client()
bucket_name = write_to
bucket = client.get_bucket(bucket_name)
bucket.delete_blobs(list(bucket.list_blobs()))
write_to_path = Path(write_to)
clear_directory_(write_to_path)
# loop and write all train and valid files to tfrecords
for (seq_type, seqs) in (('train', train_seqs), ('valid', valid_seqs)):
num_split = ceil(seqs.shape[0] / num_sequences_per_file)
for file_index, indices in enumerate(np.array_split(seqs, num_split)):
num_sequences = len(indices)
tfrecord_filename = f'{file_index}.{num_sequences}.{seq_type}.tfrecord.gz'
tfrecord_path = str(write_to_path / tfrecord_filename)
with with_tfrecord_writer(tfrecord_path) as write:
for index in indices:
filename = filenames[index]
with gzip.open(filename, 'rb') as f:
write(f.read())
if upload_gcs:
blob = bucket.blob(tfrecord_filename)
blob.upload_from_filename(tfrecord_path, timeout = GCS_WRITE_TIMEOUT)
with Flow('parse-fasta') as flow:
config = Parameter('config', required = True)
fasta_to_tmp_files(config = config)
files_to_tfrecords(config = config)
@click.command()
@click.option('--data_dir', default = './configs/data')
@click.option('--name', default = 'default')
def main(
data_dir,
name
):
data_dir = Path(data_dir)
config_path = data_dir / f'{name}.toml'
assert config_path.exists(), f'config does not exist at {str(config_path)}'
config = toml.loads(config_path.read_text())
flow.run(config = config)
if __name__ == '__main__':
main()
| progen-main | generate_data.py |
from dotenv import load_dotenv
load_dotenv()
import click
import humanize
from jinja2 import Template
from pathlib import Path
import tqdm
import numpy as np
import toml
import jax
from jax import nn, random, jit, tree_util, tree_map
from optax import adamw, clip_by_global_norm, chain, apply_updates, apply_every
from haiku import PRNGSequence
from progen_transformer import ProGen
from progen_transformer.data import decode_tokens, iterator_from_tfrecords_folder
from progen_transformer.utils import sample, get_loss_fn, set_hardware_rng_, confirm, exists
from progen_transformer.checkpoint import get_checkpoint_fns
import wandb
# sample html
sample_tmpl = Template("""<i>{{prime_str}}</i><br/><br/><div style="overflow-wrap: break-word;">{{sampled_str}}</div>""")
# speedup rng
set_hardware_rng_(jax)
# main functions
@click.command()
@click.option('--seed', default = 42)
@click.option('--batch_size', default = 4)
@click.option('--grad_accum_every', default = 4)
@click.option('--learning_rate', default = 2e-4)
@click.option('--weight_decay', default = 1e-3)
@click.option('--data_parallel', default = False, is_flag = True)
@click.option('--max_grad_norm', default = 0.5)
@click.option('--validate_every', default = 100)
@click.option('--sample_every', default = 500)
@click.option('--checkpoint_every', default = 1000)
@click.option('--checkpoint_path', default = './ckpts')
@click.option('--checkpoint_keep_n', default = 500)
@click.option('--config_path', default = './configs/model')
@click.option('--model_name', default = 'default')
@click.option('--prime_length', default = 25)
@click.option('--seq_len', default = 1024)
@click.option('--mixed_precision', default = False, is_flag = True)
@click.option('--data_path', default = './train_data')
@click.option('--wandb_off', default = False, is_flag = True)
@click.option('--wandb_project_name', default = 'progen-training')
@click.option('--new', default = False, is_flag = True)
def main(
seed,
batch_size,
grad_accum_every,
learning_rate,
weight_decay,
data_parallel,
max_grad_norm,
validate_every,
sample_every,
checkpoint_every,
checkpoint_path,
checkpoint_keep_n,
config_path,
model_name,
prime_length,
seq_len,
mixed_precision,
data_path,
wandb_off,
wandb_project_name,
new
):
# prepare folders
reset_checkpoint, get_last_checkpoint, save_checkpoint = get_checkpoint_fns(checkpoint_path)
if new:
if not confirm('are you sure you want to clear all your checkpoints and restart training?'):
exit()
reset_checkpoint()
# initialize all states, or load from checkpoint
last_checkpoint = get_last_checkpoint()
if not exists(last_checkpoint):
config_folder_path = Path(config_path)
config_path = config_folder_path / f'{model_name}.toml'
assert config_path.exists(), f'path to your model config {str(config_path)} does not exist'
model_kwargs = toml.loads(config_path.read_text())
else:
model_kwargs = last_checkpoint['model_config']
# setup model and params
model = ProGen(**{
**model_kwargs,
'mixed_precision': mixed_precision
})
model_apply = jit(model.apply)
rng = PRNGSequence(seed)
loss_fn = get_loss_fn(model, data_parallel = data_parallel)
# optimizer
exclude_norm_and_bias_params = lambda p: tree_map(lambda x: x.ndim > 1, p)
optim = chain(
clip_by_global_norm(max_grad_norm),
adamw(learning_rate, weight_decay = weight_decay, mask = exclude_norm_and_bias_params),
apply_every(grad_accum_every)
)
# get params and optimizer state
if exists(last_checkpoint):
params = last_checkpoint['params']
optim_state = last_checkpoint['optim_state']
start_seq_index = last_checkpoint['next_seq_index']
else:
mock_data = np.zeros((model_kwargs['seq_len'],), dtype = np.uint8)
params = model.init(next(rng), mock_data)
optim_state = optim.init(params)
start_seq_index = 0
# experiment tracker
seq_len = model_kwargs['seq_len']
num_params = tree_util.tree_reduce(lambda acc, el: acc + el.size, params, 0)
num_params_readable = humanize.naturalsize(num_params)
wandb.config.num_params = num_params
wandb_kwargs = {'mode': 'disabled'} if wandb_off else {}
if exists(last_checkpoint) and exists(last_checkpoint['run_id']):
run_id = last_checkpoint['run_id']
wandb_kwargs = {**wandb_kwargs, 'id': run_id, 'resume': 'allow'}
wandb.init(project = wandb_project_name, **wandb_kwargs)
wandb_run_id = wandb.run.id if not wandb_off else None
# get tf dataset
total_train_seqs, get_train_dataset = iterator_from_tfrecords_folder(data_path, data_type = 'train')
total_valid_seqs, get_valid_dataset = iterator_from_tfrecords_folder(data_path, data_type = 'valid',)
assert total_train_seqs > 0, 'no protein sequences found for training'
assert total_valid_seqs > 0, 'no protein sequences found for validation'
train_dataset = get_train_dataset(
seq_len = seq_len,
batch_size = batch_size,
skip = start_seq_index
)
valid_dataset = get_valid_dataset(
seq_len = seq_len,
batch_size = batch_size,
loop = True
)
# print
print(f'params: {num_params_readable}')
print(f'sequence length: {seq_len}')
print(f'num sequences: {total_train_seqs}')
print(f'starting from sequence {start_seq_index}')
# training
effective_batch_size = batch_size * grad_accum_every
seq_index_ranges = range(start_seq_index, total_train_seqs, effective_batch_size)
for i, seq_index in tqdm.tqdm(enumerate(seq_index_ranges), mininterval = 10., desc = 'training', total = len(seq_index_ranges)):
for _ in range(grad_accum_every):
data = next(train_dataset)
loss, grads = loss_fn(params, next(rng), data)
updates, optim_state = optim.update(grads, optim_state, params)
params = apply_updates(params, updates)
print(f'loss: {loss.item()}')
wandb.log({'loss': loss.item()})
if i % checkpoint_every == 0:
package = {
'next_seq_index': seq_index + effective_batch_size,
'params': params,
'optim_state': optim_state,
'model_config': model_kwargs,
'run_id': wandb_run_id
}
save_checkpoint(package, checkpoint_keep_n)
print(f"checkpoint to start at sequence index of {package['next_seq_index']}")
if i % validate_every == 0:
valid_data = next(valid_dataset)
loss, _ = loss_fn(params, next(rng), valid_data)
print(f'valid_loss: {loss.item()}')
wandb.log({'valid_loss': loss.item()})
if i % sample_every == 0:
valid_data = next(valid_dataset)[0]
prime = valid_data[:prime_length]
prime_str = decode_tokens(prime)
sampled = sample(rng, model_apply, params, prime, seq_len, top_k = 25)
sampled_str = decode_tokens(sampled[prime_length:])
print(prime_str, "\n", "*" * 40, "\n", sampled_str)
wandb.log({'samples': wandb.Html(sample_tmpl.render(prime_str = prime_str, sampled_str = sampled_str))})
if __name__ == '__main__':
main()
| progen-main | train.py |
from dotenv import load_dotenv
load_dotenv()
import click
import humanize
import jax
from jax import nn, random, jit, tree_util, numpy as np
from haiku import PRNGSequence
from progen_transformer import ProGen
from progen_transformer.data import decode_tokens, encode_tokens
from progen_transformer.utils import sample, set_hardware_rng_
from progen_transformer.checkpoint import get_checkpoint_fns
# speedup rng
set_hardware_rng_(jax)
# main functions
@click.command()
@click.option('--seed', default = 42)
@click.option('--checkpoint_path', default = './ckpts')
@click.option('--prime', default = '')
def main(
seed,
checkpoint_path,
prime,
):
# prepare folders
_, get_last_checkpoint, _ = get_checkpoint_fns(checkpoint_path)
last_checkpoint = get_last_checkpoint()
if last_checkpoint is None:
exit(f'no checkpoints found at {checkpoint_path}')
params = last_checkpoint['params']
num_seqs = max(last_checkpoint['next_seq_index'], 0)
# setup model and params
model_kwargs = last_checkpoint['model_config']
model = ProGen(**model_kwargs)
model_apply = jit(model.apply)
rng = PRNGSequence(seed)
# initialize all states, or load from checkpoint
seq_len = model_kwargs['seq_len']
num_params = tree_util.tree_reduce(lambda acc, el: acc + el.size, params, 0)
num_params_readable = humanize.naturalsize(num_params)
# print
print(f'params: {num_params_readable}')
print(f'sequence length: {seq_len}')
print(f'trained for {num_seqs} sequences')
# sample with prime
prime_tokens = encode_tokens(prime)
prime_length = len(prime_tokens) + 1
prime_tensor = np.array(prime_tokens, dtype = np.uint16)
sampled = sample(rng, jit(model_apply), params, prime_tensor, seq_len, top_k = 25, add_bos = True)
sampled_str = decode_tokens(sampled[prime_length:])
print("\n", prime, "\n", "*" * 40, "\n", sampled_str)
if __name__ == '__main__':
main()
| progen-main | sample.py |
import time
import os, errno
from pathlib import Path
from functools import partial
from google.cloud import storage
from cloudpickle import pickle
from progen_transformer.utils import clear_directory_, silentremove
# filesystem checkpoint fns
def file_reset_checkpoint(path):
clear_directory_(path)
def file_get_last_checkpoint(path):
checkpoints = sorted(path.glob('**/ckpt_*'))
if len(checkpoints) == 0:
return None
with open(str(checkpoints[-1]), 'rb') as f:
package = pickle.load(f)
return package
def file_save_checkpoint(path, package, keep_last_n = None):
unix_time = int(time.time())
checkpoints = sorted(path.glob('**/ckpt_*'))
num_checkpoints = len(checkpoints)
with open(str(path / f'ckpt_{unix_time}.pkl'), 'wb') as f:
pickle.dump(package, f)
if keep_last_n is None:
return
for path_to_rm in checkpoints[:max(0, num_checkpoints - keep_last_n)]:
silentremove(path_to_rm)
# gcs checkpoint fns
GCS_READ_TIMEOUT = 60 * 30
GCS_WRITE_TIMEOUT = 60 * 30
def gcs_reset_checkpoint(bucket):
bucket.delete_blobs(list(bucket.list_blobs()))
def gcs_get_last_checkpoint(bucket):
blobs = sorted(list(bucket.list_blobs()))
if len(blobs) == 0:
return None
last_checkpoint = blobs[-1]
filename = f'/tmp/{last_checkpoint.name}'
with open(filename, 'wb') as f:
last_checkpoint.download_to_file(f, timeout = GCS_READ_TIMEOUT)
with open(filename, 'rb') as f:
package = pickle.load(f)
return package
def gcs_save_checkpoint(bucket, package, keep_last_n = None):
unix_time = int(time.time())
blobs = sorted(list(bucket.list_blobs()))
num_checkpoints = len(blobs)
filename = f'ckpt_{unix_time}.pkl'
tmp_path = f'/tmp/{filename}'
with open(tmp_path, 'wb') as f:
pickle.dump(package, f)
blob = bucket.blob(filename)
blob.upload_from_filename(tmp_path, timeout = GCS_WRITE_TIMEOUT)
if keep_last_n is None:
return
bucket.delete_blobs(blobs[:max(0, num_checkpoints - keep_last_n)])
# factory fn
def get_checkpoint_fns(path):
use_gcs = path.startswith('gs://')
if not use_gcs:
obj = Path(path)
obj.mkdir(exist_ok = True, parents = True)
fns = (
file_reset_checkpoint,
file_get_last_checkpoint,
file_save_checkpoint
)
else:
client = storage.Client()
bucket_name = path[5:]
obj = client.get_bucket(bucket_name)
fns = (
gcs_reset_checkpoint,
gcs_get_last_checkpoint,
gcs_save_checkpoint
)
fns = tuple(map(lambda fn: partial(fn, obj), fns))
return fns
| progen-main | progen_transformer/checkpoint.py |
from functools import partial
import jax
from jax import random
from jax import nn
from jax.lax import stop_gradient
import jax.numpy as np
import jmp
import haiku as hk
from haiku import initializers
from einops import rearrange, repeat
from progen_transformer.utils import exists
# constants
ATTN_MASK_VALUE = -1e10
# helpers
LayerNorm = partial(hk.LayerNorm, create_scale = True, create_offset = False, axis = -1)
def fixed_pos_embedding(seq, dim):
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum("i , j -> i j", np.arange(seq), inv_freq)
sinusoid_inp = repeat(sinusoid_inp, "b n -> b (n r)", r = 2)[None, :, :]
return np.sin(sinusoid_inp), np.cos(sinusoid_inp)
def rotate_every_two(x):
x = rearrange(x, '... (d r) -> ... d r', r = 2)
x1, x2 = x[..., 0], x[..., 1]
x = np.stack((-x2, x1), axis = -1)
return rearrange(x, "... d r -> ... (d r)")
def apply_rotary_pos_emb(x, sincos):
sin, cos = sincos
rot_dim = sin.shape[-1]
x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
x = (x * cos) + (rotate_every_two(x) * sin)
return np.concatenate((x, x_pass), axis = -1)
def shift_tokens(x):
x_shift, x_pass = np.array_split(x, 2, axis = -1)
x_shift = np.pad(x_shift, ((1, 0), (0, 0)), mode = 'constant')[:-1]
return np.concatenate((x_shift, x_pass), axis = -1)
# classes
class LocalAttention(hk.Module):
def __init__(
self,
*,
name,
dim,
window_size,
heads = 8,
dim_head = 64,
shift_tokens = True
):
super().__init__(name = name)
self.heads = heads
self.scale = dim_head ** -0.5
self.window_size = window_size
inner_dim = dim_head * heads
self.norm = LayerNorm()
self.shift_tokens = shift_tokens
self.to_qkv = hk.Linear(inner_dim * 3, with_bias = False)
self.to_out = hk.Linear(dim)
def __call__(self, x, *, pos_emb):
x = self.norm(x)
if self.shift_tokens:
x = shift_tokens(x)
n, h, wsz = x.shape[0], self.heads, self.window_size
assert (n % wsz) == 0, 'sequence length must be divisible by the window size'
window = n // wsz
qkv = self.to_qkv(x)
q, k, v = np.split(qkv, 3, axis = -1)
q, k, v = map(lambda t: rearrange(t, 'n (h d) -> h n d', h = h), (q, k, v))
q, k, v = map(lambda t: apply_rotary_pos_emb(t, pos_emb), (q, k, v))
q, k, v = map(lambda t: rearrange(t, 'h (w n) d -> h w n d', w = window), (q, k, v))
k, v = map(lambda t: np.pad(t, ((0, 0), (1, 0), (0, 0), (0, 0)), constant_values = 0.), (k ,v))
k, v = map(lambda t: np.concatenate((t[:, :-1], t[:, 1:]), axis = 2), (k, v))
sim = np.einsum('h w i d, h w j d -> h w i j', q, k) * self.scale
mask = np.tril(np.ones((wsz, wsz * 2)), wsz)
sim = np.where(mask, sim, ATTN_MASK_VALUE)
sim = sim - stop_gradient(np.amax(sim, axis = -1, keepdims = True))
attn = nn.softmax(sim, axis = -1)
out = np.einsum('h w i j, h w j d -> h w i d', attn, v)
out = rearrange(out, 'h w n d -> (w n) (h d)')
return self.to_out(out)
class FeedForward(hk.Module):
def __init__(
self,
*,
name,
dim,
ff_mult = 4,
glu = False,
seq_len = None,
spatial_gate = False,
shift_tokens = True
):
super().__init__(name = name)
assert not (glu and spatial_gate), 'glu and sgu cannot be turned on at the same time'
hidden_dim = dim * ff_mult
hidden_dim *= (1 if not glu else 2)
self.norm = LayerNorm()
self.shift_tokens = shift_tokens
self.proj_in = hk.Linear(hidden_dim)
self.proj_out = hk.Linear(dim)
self.glu = glu
self.sgu = SGU(dim = hidden_dim, dim_out = hidden_dim // 2, seq_len = seq_len) if spatial_gate else None
def __call__(self, x):
x = self.norm(x)
if self.shift_tokens:
x = shift_tokens(x)
x = self.proj_in(x)
if self.glu:
x, gate = np.split(x, 2, axis = -1)
x *= nn.gelu(gate)
else:
x = nn.gelu(x)
if exists(self.sgu):
x = self.sgu(x)
x = self.proj_out(x)
return x
class SGU(hk.Module):
def __init__(
self,
*,
dim,
dim_out,
seq_len,
eps = 1e-3
):
super().__init__()
self.eps = eps
self.seq_len = seq_len
self.norm = LayerNorm()
self.proj_out = hk.Linear(dim_out)
def __call__(self, x):
n = self.seq_len
x, gate = np.split(x, 2, axis = -1)
gate = self.norm(gate)
init_scale = self.eps / n
init_eps = initializers.RandomUniform(minval = -init_scale, maxval = init_scale)
weights = hk.get_parameter('spatial_weights', shape = (n, n), init = init_eps)
biases = hk.get_parameter('spatial_biases', shape = (n, 1), init = np.ones)
mask = np.tril(np.ones((n, n)))
weights = weights * mask
gate = np.einsum('n d, m n -> m d', gate, weights)
gate += biases
x = x * gate
return self.proj_out(x)
class ProGenBase(hk.Module):
def __init__(
self,
*,
num_tokens,
dim,
seq_len,
depth,
window_size = 256,
global_mlp_depth = 2,
heads = 8,
dim_head = 64,
ff_mult = 4,
ff_glu = True,
attn_dim = None,
clamp_gate = True,
shift_tokens = True
):
super().__init__()
self.dim_head = dim_head
self.embed = hk.Embed(num_tokens, dim)
self.layers = []
for i in range(depth):
use_gmlp = (depth - i) <= global_mlp_depth
use_ff_glu = not use_gmlp and ff_glu
self.layers.append([
LocalAttention(name = f'attn{i}', dim = dim, window_size = window_size, heads = heads, dim_head = dim_head, shift_tokens = shift_tokens),
FeedForward(name = f'ff{i}', dim = dim, ff_mult = ff_mult, seq_len = seq_len, spatial_gate = use_gmlp, glu = use_ff_glu, shift_tokens = shift_tokens)
])
self.to_logits = hk.Sequential([
LayerNorm(),
hk.Linear(num_tokens)
])
def __call__(self, x):
n = x.shape[0]
x = self.embed(x)
rotary_emb = fixed_pos_embedding(n, self.dim_head)
for attn, ff in self.layers:
x += attn(x, pos_emb = rotary_emb)
x += ff(x)
return self.to_logits(x)
def ProGen(mixed_precision = False, mixed_precision_policy = dict(params = 'float32', compute = 'float16', output = 'float32'), **kwargs):
@hk.transform
def inner(seq):
if mixed_precision:
serialized_policy = ','.join([f'{k}={v}' for k, v in mixed_precision_policy.items()])
policy = jmp.get_policy(serialized_policy)
hk.mixed_precision.set_policy(ProGenBase, policy)
return ProGenBase(**kwargs)(seq)
return inner
| progen-main | progen_transformer/progen.py |
from progen_transformer.progen import ProGen
| progen-main | progen_transformer/__init__.py |
from math import ceil
import os, errno
from shutil import rmtree
import jax
from jax import random, nn, value_and_grad, vmap, pmap, jit, lax
from jax.lax import top_k
import jax.numpy as np
from einops import rearrange
# helper functions
def noop(x):
return x
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return np.log(t + eps)
def confirm(question):
while True:
resp = input(f'{question} (y/n) ')
lower_resp = resp.lower()
if lower_resp in ('y', 'n'):
return lower_resp == 'y'
def clear_directory_(path):
rmtree(str(path), ignore_errors = True)
path.mkdir(exist_ok = True, parents = True)
def silentremove(filename):
try:
os.remove(filename)
except OSError:
pass
# training functions
def masked_mean(t, mask, axis = None):
return (t * mask).sum(axis = axis) / mask.sum(axis = axis)
def cross_entropy(logits, targets, axis = -1, ignore_index = 0):
logprobs = nn.log_softmax(logits, axis = axis)
nll = np.take_along_axis(logprobs, np.expand_dims(targets, axis = axis), axis = axis)
nll = nll.squeeze(-1)
# mask for loss is engineered so that it learns from the first padding token
# the padding token is reused as end-of-string for simplicity
mask = (targets != ignore_index)
eos_mask = (~mask).cumsum(axis = -1) == 1
mask = mask | eos_mask
ce = -masked_mean(nll, mask, axis = -1)
return ce
def get_loss_fn(model, data_parallel = False):
def loss_fn(params, key, data):
ids, labels = data[:-1], data[1:]
logits = model.apply(params, key, ids)
return cross_entropy(logits, labels, axis = -1)
loss_fn = jit(vmap(loss_fn, in_axes = (None, None, 0), out_axes = 0))
if data_parallel:
loss_fn = pmap(loss_fn, in_axes = (None, None, 0), out_axes = 0)
@value_and_grad
def batched_loss_fn(params, key, data):
if not data_parallel:
values = loss_fn(params, key, data)
return np.mean(values)
mask = np.ones((data.shape[0],))
device_count = jax.local_device_count()
batch_size = data.shape[0]
remainder = (batch_size % device_count)
if remainder != 0:
padding = device_count - remainder
data = np.pad(data, ((0, padding), (0, 0)))
mask = np.pad(mask, ((0, padding)))
data, mask = map(lambda t: rearrange(t, '(p b) ... -> p b ...', p = device_count), (data, mask))
values = loss_fn(params, key, data)
return masked_mean(values, mask)
return batched_loss_fn
# sampling functions
def select_top_k(tensor, k):
values, _ = top_k(tensor, k)
mask = tensor > values.min()
return mask, np.where(mask, tensor, 0.)
def gumbel_noise(rng, shape):
noise = random.uniform(rng, shape = shape, minval = 0., maxval = 1.)
return -log(-log(noise))
def sample(rng, fn, params, prime, length, top_k = None, add_bos = False):
start_pos = prime.shape[-1]
pad_right = length - prime.shape[-1]
padding = (0, pad_right) if not add_bos else (1, pad_right - 1)
seq = np.pad(prime, padding)
one_hots = np.eye(length, dtype = int)
for curr_pos in range(start_pos, length):
logits = fn(params, next(rng), seq)
logits = logits[curr_pos - 1]
noise = gumbel_noise(next(rng), logits.shape)
if exists(top_k):
mask, logits = select_top_k(logits, top_k)
noise *= mask
logits += noise
sampled_ind = np.argmax(logits, axis = -1)
one_hot = one_hots[curr_pos]
seq += one_hot * sampled_ind
# for now, just set everything after second padding token (eos) to padding
remove_after_eos_mask = (seq == 0).cumsum(axis = -1) > 1
seq *= ~remove_after_eos_mask
return seq
# rng hacks
def hardware_uniform(
rng_key,
shape,
dtype = np.float32,
minval = np.float32(0),
maxval = np.float32(1)
):
del rng_key
minval = lax.convert_element_type(minval, dtype)
maxval = lax.convert_element_type(maxval, dtype)
return lax.rng_uniform(minval, maxval, shape)
def hardware_bernoulli(rng_key, p = np.float32(0.5), shape = None):
del rng_key
return lax.rng_uniform(0.0, 1.0, shape) < p
def set_hardware_rng_(jax):
jax.random.bernoulli = hardware_bernoulli
jax.random.uniform = hardware_uniform
jax._src.random.uniform = hardware_uniform
| progen-main | progen_transformer/utils.py |
import tensorflow as tf
import numpy as np
from functools import partial
from pathlib import Path
from contextlib import contextmanager
# writing tfrecords
def write(writer, values):
record_bytes = tf.train.Example(features = tf.train.Features(feature={
'seq': tf.train.Feature(bytes_list = tf.train.BytesList(value=[values]))
})).SerializeToString()
writer.write(record_bytes)
@contextmanager
def with_tfrecord_writer(path):
options = tf.io.TFRecordOptions(compression_type = 'GZIP')
with tf.io.TFRecordWriter(path, options = options) as writer:
yield partial(write, writer)
# reading tfrecords
def parse_fn(sample):
return tf.io.parse_single_example(sample, {
'seq': tf.io.FixedLenFeature([], tf.string)
})
def collate_fn(batch, pad_length, offset = 0):
tensors = [np.frombuffer(el, dtype = np.uint8).astype(np.uint16) for el in batch.numpy()]
tensors = map(lambda t: t[..., :pad_length], tensors)
tensors = map(lambda t: t + offset, tensors)
padded_tensors = map(lambda t: np.pad(t, (0, pad_length - t.shape[-1])), tensors)
return np.stack(list(padded_tensors))
def iterator_from_tfrecords_folder(folder, data_type = 'train'):
is_gcs_path = folder.startswith('gs://')
if is_gcs_path:
filenames = tf.io.gfile.glob(f'{folder}/*.{data_type}.tfrecord.gz')
else:
folder = Path(folder)
filenames = [str(p) for p in folder.glob(f'**/*.{data_type}.tfrecord.gz')]
num_seqs = sum(map(lambda t: int(t.split('.')[-4]), filenames))
def iter_fn(
seq_len,
batch_size,
skip = 0,
loop = False
):
dataset = tf.data.TFRecordDataset(filenames, compression_type = 'GZIP')
dataset = dataset.skip(skip)
dataset = dataset.map(parse_fn)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if loop:
dataset = dataset.repeat()
for batch in dataset:
seq = batch['seq']
batch_size = seq.shape[0]
seq = collate_fn(seq, pad_length = seq_len, offset = 1)
bos = np.zeros((batch_size, 1), dtype = np.uint16)
seq = np.concatenate((bos, seq), axis = 1)
yield seq
return num_seqs, iter_fn
# tokenization
def encode_token(token):
return ord(token) + 1
def decode_token(token):
if token < 0:
return ''
return str(chr(token))
def encode_tokens(tokens):
return list(map(encode_token, tokens))
def decode_tokens(tokens, offset = 1):
return ''.join(list(map(decode_token, tokens.astype(np.int16) - offset)))
| progen-main | progen_transformer/data.py |
from setuptools import setup, find_packages
setup(
name = 'einops-exts',
packages = find_packages(exclude=[]),
version = '0.0.4',
license='MIT',
description = 'Einops Extensions',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/einops-exts',
keywords = [
'artificial intelligence',
'deep learning',
'tensor manipulation'
],
install_requires=[
'einops>=0.4',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| einops-exts-main | setup.py |
from einops_exts.einops_exts import check_shape
from einops_exts.einops_exts import rearrange_many, repeat_many, reduce_many
from einops_exts.einops_exts import rearrange_with_anon_dims, repeat_with_anon_dims, reduce_with_anon_dims
| einops-exts-main | einops_exts/__init__.py |
import re
from torch import nn
from functools import wraps, partial
from einops import rearrange, reduce, repeat
# checking shape
# @nils-werner
# https://github.com/arogozhnikov/einops/issues/168#issuecomment-1042933838
def check_shape(tensor, pattern, **kwargs):
return rearrange(tensor, f"{pattern} -> {pattern}", **kwargs)
# do same einops operations on a list of tensors
def _many(fn):
@wraps(fn)
def inner(tensors, pattern, **kwargs):
return (fn(tensor, pattern, **kwargs) for tensor in tensors)
return inner
# do einops with unflattening of anonymously named dimensions
# (...flattened) -> ...flattened
def _with_anon_dims(fn):
@wraps(fn)
def inner(tensor, pattern, **kwargs):
regex = r'(\.\.\.[a-zA-Z]+)'
matches = re.findall(regex, pattern)
get_anon_dim_name = lambda t: t.lstrip('...')
dim_prefixes = tuple(map(get_anon_dim_name, set(matches)))
update_kwargs_dict = dict()
for prefix in dim_prefixes:
assert prefix in kwargs, f'dimension list "{prefix}" was not passed in'
dim_list = kwargs[prefix]
assert isinstance(dim_list, (list, tuple)), f'dimension list "{prefix}" needs to be a tuple of list of dimensions'
dim_names = list(map(lambda ind: f'{prefix}{ind}', range(len(dim_list))))
update_kwargs_dict[prefix] = dict(zip(dim_names, dim_list))
def sub_with_anonymous_dims(t):
dim_name_prefix = get_anon_dim_name(t.groups()[0])
return ' '.join(update_kwargs_dict[dim_name_prefix].keys())
pattern_new = re.sub(regex, sub_with_anonymous_dims, pattern)
for prefix, update_dict in update_kwargs_dict.items():
del kwargs[prefix]
kwargs.update(update_dict)
return fn(tensor, pattern_new, **kwargs)
return inner
# generate all helper functions
rearrange_many = _many(rearrange)
repeat_many = _many(repeat)
reduce_many = _many(reduce)
rearrange_with_anon_dims = _with_anon_dims(rearrange)
repeat_with_anon_dims = _with_anon_dims(repeat)
reduce_with_anon_dims = _with_anon_dims(reduce)
| einops-exts-main | einops_exts/einops_exts.py |
from torch import nn
from einops import rearrange
# for rearranging to and from a pattern
class EinopsToAndFrom(nn.Module):
def __init__(self, from_einops, to_einops, fn):
super().__init__()
self.from_einops = from_einops
self.to_einops = to_einops
self.fn = fn
if '...' in from_einops:
before, after = [part.strip().split() for part in from_einops.split('...')]
self.reconstitute_keys = tuple(zip(before, range(len(before)))) + tuple(zip(after, range(-len(after), 0)))
else:
split = from_einops.strip().split()
self.reconstitute_keys = tuple(zip(split, range(len(split))))
def forward(self, x, **kwargs):
shape = x.shape
reconstitute_kwargs = {key: shape[position] for key, position in self.reconstitute_keys}
x = rearrange(x, f'{self.from_einops} -> {self.to_einops}')
x = self.fn(x, **kwargs)
x = rearrange(x, f'{self.to_einops} -> {self.from_einops}', **reconstitute_kwargs)
return x
| einops-exts-main | einops_exts/torch.py |
from setuptools import setup, find_packages
setup(
name = 'phenaki-pytorch',
packages = find_packages(exclude=[]),
version = '0.3.1',
license='MIT',
description = 'Phenaki - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/phenaki-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanisms',
'text-to-video'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6',
'ema-pytorch>=0.2.2',
'opencv-python',
'pillow',
'numpy',
'sentencepiece',
'torch>=1.6',
'torchtyping',
'torchvision',
'transformers>=4.20.1',
'tqdm',
'vector-quantize-pytorch>=0.10.15'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| phenaki-pytorch-main | setup.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from beartype import beartype
from typing import Tuple
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
def l2norm(t):
return F.normalize(t, dim = -1)
# bias-less layernorm, being used in more recent T5s, PaLM, also in @borisdayma 's experiments shared with me
# greater stability
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, dropout = 0.):
inner_dim = int(mult * (2 / 3) * dim)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
# PEG - position generating module
class PEG(nn.Module):
def __init__(self, dim, causal = False):
super().__init__()
self.causal = causal
self.dsconv = nn.Conv3d(dim, dim, 3, groups = dim)
@beartype
def forward(self, x, shape: Tuple[int, int, int, int] = None):
needs_shape = x.ndim == 3
assert not (needs_shape and not exists(shape))
orig_shape = x.shape
if needs_shape:
x = x.reshape(*shape, -1)
x = rearrange(x, 'b ... d -> b d ...')
frame_padding = (2, 0) if self.causal else (1, 1)
x = F.pad(x, (1, 1, 1, 1, *frame_padding), value = 0.)
x = self.dsconv(x)
x = rearrange(x, 'b d ... -> b ... d')
if needs_shape:
x = rearrange(x, 'b ... d -> b (...) d')
return x.reshape(orig_shape)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_context = None,
dim_head = 64,
heads = 8,
causal = False,
num_null_kv = 0,
norm_context = True,
dropout = 0.,
scale = 8
):
super().__init__()
self.heads = heads
self.causal = causal
self.scale = scale
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
if causal:
self.rel_pos_bias = AlibiPositionalBias(heads = heads)
self.attn_dropout = nn.Dropout(dropout)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity()
self.num_null_kv = num_null_kv
self.null_kv = nn.Parameter(torch.randn(heads, 2 * num_null_kv, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
mask = None,
context = None,
attn_bias = None
):
batch, device, dtype = x.shape[0], x.device, x.dtype
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
x = self.norm(x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
nk, nv = repeat(self.null_kv, 'h (n r) d -> b h n r d', b = batch, r = 2).unbind(dim = -2)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
i, j = sim.shape[-2:]
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (self.num_null_kv, 0), value = 0.)
sim = sim + attn_bias
if exists(mask):
mask = F.pad(mask, (self.num_null_kv, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
if self.causal:
sim = sim + self.rel_pos_bias(sim)
causal_mask = torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# alibi positional bias for extrapolation
class AlibiPositionalBias(nn.Module):
def __init__(self, heads):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, sim):
h, i, j, device = *sim.shape[-3:], sim.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied))
self.register_buffer('bias', bias, persistent = False)
return self.bias
class ContinuousPositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(
self,
*,
dim,
heads,
num_dims = 2, # 2 for images, 3 for video
layers = 2,
log_dist = True,
cache_rel_pos = False
):
super().__init__()
self.num_dims = num_dims
self.log_dist = log_dist
self.net = nn.ModuleList([])
self.net.append(nn.Sequential(nn.Linear(self.num_dims, dim), leaky_relu()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), leaky_relu()))
self.net.append(nn.Linear(dim, heads))
self.cache_rel_pos = cache_rel_pos
self.register_buffer('rel_pos', None, persistent = False)
def forward(self, *dimensions, device = torch.device('cpu')):
if not exists(self.rel_pos) or not self.cache_rel_pos:
positions = [torch.arange(d, device = device) for d in dimensions]
grid = torch.stack(torch.meshgrid(*positions, indexing = 'ij'))
grid = rearrange(grid, 'c ... -> (...) c')
rel_pos = rearrange(grid, 'i c -> i 1 c') - rearrange(grid, 'j c -> 1 j c')
if self.log_dist:
rel_pos = torch.sign(rel_pos) * torch.log(rel_pos.abs() + 1)
self.register_buffer('rel_pos', rel_pos, persistent = False)
rel_pos = self.rel_pos.float()
for layer in self.net:
rel_pos = layer(rel_pos)
return rearrange(rel_pos, 'i j h -> h i j')
# transformer
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
dim_context = None,
causal = False,
dim_head = 64,
heads = 8,
ff_mult = 4,
peg = False,
peg_causal = False,
attn_num_null_kv = 2,
has_cross_attn = False,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PEG(dim = dim, causal = peg_causal) if peg else None,
Attention(dim = dim, dim_head = dim_head, heads = heads, causal = causal, dropout = attn_dropout),
Attention(dim = dim, dim_head = dim_head, dim_context = dim_context, heads = heads, causal = False, num_null_kv = attn_num_null_kv, dropout = attn_dropout) if has_cross_attn else None,
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm_out = LayerNorm(dim)
@beartype
def forward(
self,
x,
video_shape: Tuple[int, int, int, int] = None,
attn_bias = None,
context = None,
self_attn_mask = None,
cross_attn_context_mask = None
):
for peg, self_attn, cross_attn, ff in self.layers:
if exists(peg):
x = peg(x, shape = video_shape) + x
x = self_attn(x, attn_bias = attn_bias, mask = self_attn_mask) + x
if exists(cross_attn) and exists(context):
x = cross_attn(x, context = context, mask = cross_attn_context_mask) + x
x = ff(x) + x
return self.norm_out(x)
| phenaki-pytorch-main | phenaki_pytorch/attention.py |
import math
import copy
from pathlib import Path
from random import random, choices
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
from beartype import beartype
from beartype.door import is_bearable
from beartype.vale import Is
from typing import Optional, List, Iterable, Tuple
from typing_extensions import Annotated
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.optim import Adam
from torchvision import transforms as T
from torchvision.utils import make_grid, save_image
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
from PIL import Image
from tqdm.auto import tqdm
from phenaki_pytorch.optimizer import get_optimizer
from accelerate import Accelerator
from phenaki_pytorch.phenaki_pytorch import Phenaki
from phenaki_pytorch.data import ImageDataset, VideoDataset, video_tensor_to_gif, DataLoader
# constants
DATASET_FIELD_TYPE_CONFIG = dict(
videos = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim in {4, 5}]
],
texts = List[str],
video_codebook_ids = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.long]
],
video_frame_mask = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.bool]
],
text_embeds = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim == 3]
],
)
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def elements_to_device_if_tensor(arr, device):
output = []
for el in arr:
if isinstance(el, torch.Tensor):
el = el.to(device)
output.append(el)
return output
def split_iterable(it, split_size):
accum = []
for ind in range(math.ceil(len(it) / split_size)):
start_index = ind * split_size
accum.append(it[start_index: (start_index + split_size)])
return accum
def split(t, split_size = None):
if not exists(split_size):
return t
if isinstance(t, torch.Tensor):
return t.split(split_size, dim = 0)
if isinstance(t, Iterable):
return split_iterable(t, split_size)
return TypeError
def find_first(cond, arr):
for el in arr:
if cond(el):
return el
return None
def split_args_and_kwargs(*args, batch_size = None, split_size = None, **kwargs):
all_args = (*args, *kwargs.values())
len_all_args = len(all_args)
if not exists(batch_size):
first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)
assert exists(first_tensor)
batch_size = len(first_tensor)
split_size = default(split_size, batch_size)
num_chunks = math.ceil(batch_size / split_size)
dict_len = len(kwargs)
dict_keys = kwargs.keys()
split_kwargs_index = len_all_args - dict_len
split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]
chunk_sizes = tuple(map(len, split_all_args[0]))
for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):
chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]
chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))
chunk_size_frac = chunk_size / batch_size
yield chunk_size_frac, (chunked_args, chunked_kwargs)
def simple_slugify(text, max_length = 255):
return text.replace('-', '_').replace(',', '').replace(' ', '_').replace('|', '--').strip('-_')[:max_length]
def has_duplicates(tup):
counts = dict()
for el in tup:
if el not in counts:
counts[el] = 0
counts[el] += 1
return any(filter(lambda count: count > 1, counts.values()))
def determine_types(data, config):
output = []
for el in data:
for name, data_type in config.items():
if is_bearable(el, data_type):
output.append(name)
break
else:
raise TypeError(f'unable to determine type of {data}')
return tuple(output)
# trainer class
@beartype
class PhenakiTrainer(object):
def __init__(
self,
phenaki: Phenaki,
*,
folder = None,
train_on_images = False,
batch_size = 16,
grad_accum_every = 1,
num_frames = 17,
sample_num_frames = None,
train_lr = 1e-4,
train_num_steps = 100000,
max_grad_norm = None,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
wd = 0,
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None,
sample_texts_file_path = None, # path to a text file with video captions, delimited by newline
sample_texts: Optional[List[str]] = None,
dataset: Optional[Dataset] = None,
dataset_fields: Optional[Tuple[str, ...]] = None
):
super().__init__()
maskgit = phenaki.maskgit
cvivit = phenaki.cvivit
assert exists(cvivit), 'cvivit must be present on phenaki'
# define accelerator
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = phenaki
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.unconditional = maskgit.unconditional
# training related variables
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
self.max_grad_norm = max_grad_norm
self.train_num_steps = train_num_steps
self.image_size = cvivit.image_size
# sampling related variables
self.num_samples = num_samples
self.sample_texts = None
if exists(sample_texts_file_path):
sample_texts_file_path = Path(sample_texts_file_path)
assert sample_texts_file_path.exists()
captions = sample_texts_file_path.read_text().split('\n')
self.sample_texts = list(filter(len, captions))
elif exists(self.sample_texts):
self.sample_texts = sample_texts
assert maskgit.unconditional or exists(self.sample_texts), 'if maskgit is to be trained text conditioned, `sample_texts` List[str] or `sample_texts_file_path` must be given'
self.save_and_sample_every = save_and_sample_every
# dataset and dataloader
dataset_klass = ImageDataset if train_on_images else VideoDataset
self.sample_num_frames = default(sample_num_frames, num_frames)
self.train_on_images = train_on_images
if dataset:
self.ds = dataset
elif train_on_images:
assert exists(folder)
self.ds = ImageDataset(folder, self.image_size)
else:
assert exists(folder)
self.ds = VideoDataset(folder, self.image_size, num_frames = num_frames)
dl = DataLoader(self.ds, batch_size = batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
if exists(dataset_fields):
assert not has_duplicates(dataset_fields), 'dataset fields must not have duplicate field names'
valid_dataset_fields = set(DATASET_FIELD_TYPE_CONFIG.keys())
assert len(set(dataset_fields) - valid_dataset_fields) == 0, f'dataset fields must be one of {valid_dataset_fields}'
self.dataset_fields = dataset_fields
# optimizer
self.opt = get_optimizer(maskgit.parameters(), lr = train_lr, wd = wd, betas = adam_betas)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(parents = True, exist_ok = True)
def data_tuple_to_kwargs(self, data):
if not exists(self.dataset_fields):
self.dataset_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.dataset_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.dataset_fields, data))
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train_step(
self,
only_train_generator = False,
only_train_critic = False
):
accelerator = self.accelerator
device = self.device
total_loss = 0.
for _ in range(self.grad_accum_every):
data = next(self.dl)
data = elements_to_device_if_tensor(data, device)
data_kwargs = self.data_tuple_to_kwargs(data)
assert not (self.train_on_images and data_kwargs['videos'].ndim != 4), 'you have it set to train on images, but the dataset is not returning tensors of 4 dimensions (batch, channels, height, width)'
with self.accelerator.autocast():
loss = self.model(**{
**data_kwargs,
'only_train_generator': only_train_generator,
'only_train_critic': only_train_critic
})
loss = loss / self.grad_accum_every
total_loss += loss.item()
self.accelerator.backward(loss)
if exists(self.max_grad_norm):
accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
if self.is_main and self.step % self.save_and_sample_every == 0:
self.model.eval()
milestone = self.step // self.save_and_sample_every
# whether to pass in texts or not
sample_kwargs = dict()
if not self.unconditional:
texts = choices(self.sample_texts, k = self.num_samples)
else:
texts = (None,) * self.num_samples
sample_kwargs = {'texts': texts}
# method to call
if self.train_on_images:
sample_method = self.model.sample_images
else:
sample_method = partial(self.model.sample, num_frames = self.sample_num_frames)
# evaluate in groups, splitting the kwargs appropriately
with torch.no_grad():
groups = num_to_groups(self.num_samples, self.batch_size)
args_kwargs_iter = split_args_and_kwargs(batch_size = self.num_samples, split_size = self.batch_size, **sample_kwargs)
all_sampled = []
for group_batch_size, (_, (_, kwargs)) in zip(groups, args_kwargs_iter):
_kwargs = kwargs if not self.unconditional else dict()
sampled = sample_method(num_frames = self.sample_num_frames, batch_size = group_batch_size, **_kwargs)
all_sampled.append(sampled)
# save video and images differently
if not self.train_on_images:
sampled_videos = torch.cat(all_sampled, dim = 0)
milestone_folder = self.results_folder / f'videos.{milestone}'
milestone_folder.mkdir(parents = True, exist_ok = True)
for ind, (video_tensor, video_caption) in enumerate(zip(sampled_videos.unbind(dim = 0), texts)):
slugged_video_caption = simple_slugify(video_caption) if exists(video_caption) else str(ind)
video_tensor_to_gif(video_tensor, str(milestone_folder / f'{slugged_video_caption}.gif'))
else:
nrows = int(math.sqrt(self.num_samples))
sampled_images = sampled_videos.detach().cpu().float().clamp(0., 1.)
grid = make_grid(sampled_images, nrow = nrows, normalize = True, value_range = (0, 1))
save_image(grid, str(self.results_folder / f'{milestone}.png'))
# save checkpoints
self.save(milestone)
self.step += 1
return total_loss
def train(
self,
only_train_generator = False,
only_train_critic = False
):
with tqdm(
initial = self.step,
total = self.train_num_steps,
disable = not self.is_main
) as pbar:
while self.step < self.train_num_steps:
loss = self.train_step(
only_train_generator = only_train_generator,
only_train_critic = only_train_critic
)
pbar.set_description(f'loss: {loss:.4f}')
pbar.update(1)
self.print('training complete')
| phenaki-pytorch-main | phenaki_pytorch/phenaki_trainer.py |
from pathlib import Path
import copy
import math
from functools import wraps
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.autograd import grad as torch_grad
import torchvision
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from vector_quantize_pytorch import VectorQuantize
from phenaki_pytorch.attention import Attention, Transformer, ContinuousPositionBias
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
def remove_vgg(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
has_vgg = hasattr(self, 'vgg')
if has_vgg:
vgg = self.vgg
delattr(self, 'vgg')
out = fn(self, *args, **kwargs)
if has_vgg:
self.vgg = vgg
return out
return inner
def pair(val):
ret = (val, val) if not isinstance(val, tuple) else val
assert len(ret) == 2
return ret
def cast_tuple(val, l = 1):
return val if isinstance(val, tuple) else (val,) * l
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(
outputs = output,
inputs = images,
grad_outputs = torch.ones(output.size(), device = images.device),
create_graph = True,
retain_graph = True,
only_inputs = True
)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
def l2norm(t):
return F.normalize(t, dim = -1)
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
def safe_div(numer, denom, eps = 1e-8):
return numer / (denom + eps)
# gan losses
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def bce_discr_loss(fake, real):
return (-log(1 - torch.sigmoid(fake)) - log(torch.sigmoid(real))).mean()
def bce_gen_loss(fake):
return -log(torch.sigmoid(fake)).mean()
def grad_layer_wrt_loss(loss, layer):
return torch_grad(
outputs = loss,
inputs = layer,
grad_outputs = torch.ones_like(loss),
retain_graph = True
)[0].detach()
# discriminator
class DiscriminatorBlock(nn.Module):
def __init__(
self,
input_channels,
filters,
downsample = True
):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = nn.Sequential(
nn.Conv2d(input_channels, filters, 3, padding=1),
leaky_relu(),
nn.Conv2d(filters, filters, 3, padding=1),
leaky_relu()
)
self.downsample = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(filters * 4, filters, 1)
) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
if exists(self.downsample):
x = self.downsample(x)
x = (x + res) * (1 / math.sqrt(2))
return x
class Discriminator(nn.Module):
def __init__(
self,
*,
dim,
image_size,
channels = 3,
attn_res_layers = (16,),
max_dim = 512
):
super().__init__()
image_size = pair(image_size)
min_image_resolution = min(image_size)
num_layers = int(math.log2(min_image_resolution) - 2)
attn_res_layers = cast_tuple(attn_res_layers, num_layers)
blocks = []
layer_dims = [channels] + [(dim * 4) * (2 ** i) for i in range(num_layers + 1)]
layer_dims = [min(layer_dim, max_dim) for layer_dim in layer_dims]
layer_dims_in_out = tuple(zip(layer_dims[:-1], layer_dims[1:]))
blocks = []
attn_blocks = []
image_resolution = min_image_resolution
for ind, (in_chan, out_chan) in enumerate(layer_dims_in_out):
num_layer = ind + 1
is_not_last = ind != (len(layer_dims_in_out) - 1)
block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)
blocks.append(block)
attn_block = None
if image_resolution in attn_res_layers:
attn_block = Attention(dim = out_chan)
attn_blocks.append(attn_block)
image_resolution //= 2
self.blocks = nn.ModuleList(blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
dim_last = layer_dims[-1]
downsample_factor = 2 ** num_layers
last_fmap_size = tuple(map(lambda n: n // downsample_factor, image_size))
latent_dim = last_fmap_size[0] * last_fmap_size[1] * dim_last
self.to_logits = nn.Sequential(
nn.Conv2d(dim_last, dim_last, 3, padding = 1),
leaky_relu(),
Rearrange('b ... -> b (...)'),
nn.Linear(latent_dim, 1),
Rearrange('b 1 -> b')
)
def forward(self, x):
for block, attn_block in zip(self.blocks, self.attn_blocks):
x = block(x)
if exists(attn_block):
x, ps = pack([x], 'b c *')
x = rearrange(x, 'b c n -> b n c')
x = attn_block(x) + x
x = rearrange(x, 'b n c -> b c n')
x, = unpack(x, ps, 'b c *')
return self.to_logits(x)
# c-vivit - 3d ViT with factorized spatial and temporal attention made into an vqgan-vae autoencoder
def pick_video_frame(video, frame_indices):
batch, device = video.shape[0], video.device
video = rearrange(video, 'b c f ... -> b f c ...')
batch_indices = torch.arange(batch, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
images = video[batch_indices, frame_indices]
images = rearrange(images, 'b 1 c ... -> b c ...')
return images
class CViViT(nn.Module):
def __init__(
self,
*,
dim,
codebook_size,
image_size,
patch_size,
temporal_patch_size,
spatial_depth,
temporal_depth,
discr_base_dim = 16,
dim_head = 64,
heads = 8,
channels = 3,
use_vgg_and_gan = True,
vgg = None,
discr_attn_res_layers = (16,),
use_hinge_loss = True,
attn_dropout = 0.,
ff_dropout = 0.
):
"""
einstein notations:
b - batch
c - channels
t - time
d - feature dimension
p1, p2, pt - image patch sizes and then temporal patch size
"""
super().__init__()
self.image_size = pair(image_size)
self.patch_size = pair(patch_size)
patch_height, patch_width = self.patch_size
self.temporal_patch_size = temporal_patch_size
self.spatial_rel_pos_bias = ContinuousPositionBias(dim = dim, heads = heads)
image_height, image_width = self.image_size
assert (image_height % patch_height) == 0 and (image_width % patch_width) == 0
self.to_patch_emb_first_frame = nn.Sequential(
Rearrange('b c 1 (h p1) (w p2) -> b 1 h w (c p1 p2)', p1 = patch_height, p2 = patch_width),
nn.LayerNorm(channels * patch_width * patch_height),
nn.Linear(channels * patch_width * patch_height, dim),
nn.LayerNorm(dim)
)
self.to_patch_emb = nn.Sequential(
Rearrange('b c (t pt) (h p1) (w p2) -> b t h w (c pt p1 p2)', p1 = patch_height, p2 = patch_width, pt = temporal_patch_size),
nn.LayerNorm(channels * patch_width * patch_height * temporal_patch_size),
nn.Linear(channels * patch_width * patch_height * temporal_patch_size, dim),
nn.LayerNorm(dim)
)
transformer_kwargs = dict(
dim = dim,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
peg = True,
peg_causal = True,
)
self.enc_spatial_transformer = Transformer(depth = spatial_depth, **transformer_kwargs)
self.enc_temporal_transformer = Transformer(depth = temporal_depth, **transformer_kwargs)
self.vq = VectorQuantize(dim = dim, codebook_size = codebook_size, use_cosine_sim = True)
self.dec_spatial_transformer = Transformer(depth = spatial_depth, **transformer_kwargs)
self.dec_temporal_transformer = Transformer(depth = temporal_depth, **transformer_kwargs)
self.to_pixels_first_frame = nn.Sequential(
nn.Linear(dim, channels * patch_width * patch_height),
Rearrange('b 1 h w (c p1 p2) -> b c 1 (h p1) (w p2)', p1 = patch_height, p2 = patch_width)
)
self.to_pixels = nn.Sequential(
nn.Linear(dim, channels * patch_width * patch_height * temporal_patch_size),
Rearrange('b t h w (c pt p1 p2) -> b c (t pt) (h p1) (w p2)', p1 = patch_height, p2 = patch_width, pt = temporal_patch_size),
)
# turn off GAN and perceptual loss if grayscale
self.vgg = None
self.discr = None
self.use_vgg_and_gan = use_vgg_and_gan
if not use_vgg_and_gan:
return
# preceptual loss
if exists(vgg):
self.vgg = vgg
else:
self.vgg = torchvision.models.vgg16(pretrained = True)
self.vgg.classifier = nn.Sequential(*self.vgg.classifier[:-2])
# gan related losses
self.discr = Discriminator(
image_size = self.image_size,
dim = discr_base_dim,
channels = channels,
attn_res_layers = discr_attn_res_layers
)
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
def calculate_video_token_mask(self, videos, video_frame_mask):
*_, h, w = videos.shape
ph, pw = self.patch_size
assert torch.all(((video_frame_mask.sum(dim = -1) - 1) % self.temporal_patch_size) == 0), 'number of frames must be divisible by temporal patch size, subtracting off the first frame'
first_frame_mask, rest_frame_mask = video_frame_mask[:, :1], video_frame_mask[:, 1:]
rest_vq_mask = rearrange(rest_frame_mask, 'b (f p) -> b f p', p = self.temporal_patch_size)
video_mask = torch.cat((first_frame_mask, rest_vq_mask.any(dim = -1)), dim = -1)
return repeat(video_mask, 'b f -> b (f hw)', hw = (h // ph) * (w // pw))
def get_video_patch_shape(self, num_frames, include_first_frame = True):
patch_frames = 0
if include_first_frame:
num_frames -= 1
patch_frames += 1
patch_frames += (num_frames // self.temporal_patch_size)
return (patch_frames, *self.patch_height_width)
@property
def image_num_tokens(self):
return int(self.image_size[0] / self.patch_size[0]) * int(self.image_size[1] / self.patch_size[1])
def frames_per_num_tokens(self, num_tokens):
tokens_per_frame = self.image_num_tokens
assert (num_tokens % tokens_per_frame) == 0, f'number of tokens must be divisible by number of tokens per frame {tokens_per_frame}'
assert (num_tokens > 0)
pseudo_frames = num_tokens // tokens_per_frames
return (pseudo_frames - 1) * self.temporal_patch_size + 1
def num_tokens_per_frames(self, num_frames, include_first_frame = True):
image_num_tokens = self.image_num_tokens
total_tokens = 0
if include_first_frame:
num_frames -= 1
total_tokens += image_num_tokens
assert (num_frames % self.temporal_patch_size) == 0
return total_tokens + int(num_frames / self.temporal_patch_size) * image_num_tokens
def copy_for_eval(self):
device = next(self.parameters()).device
vae_copy = copy.deepcopy(self.cpu())
if vae_copy.use_vgg_and_gan:
del vae_copy.discr
del vae_copy.vgg
vae_copy.eval()
return vae_copy.to(device)
@remove_vgg
def state_dict(self, *args, **kwargs):
return super().state_dict(*args, **kwargs)
@remove_vgg
def load_state_dict(self, *args, **kwargs):
return super().load_state_dict(*args, **kwargs)
def load(self, path):
path = Path(path)
assert path.exists()
pt = torch.load(str(path))
self.load_state_dict(pt)
def decode_from_codebook_indices(self, indices):
codes = self.vq.codebook[indices]
return self.decode(codes)
@property
def patch_height_width(self):
return self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1]
def encode(
self,
tokens
):
b = tokens.shape[0]
h, w = self.patch_height_width
video_shape = tuple(tokens.shape[:-1])
tokens = rearrange(tokens, 'b t h w d -> (b t) (h w) d')
attn_bias = self.spatial_rel_pos_bias(h, w, device = tokens.device)
tokens = self.enc_spatial_transformer(tokens, attn_bias = attn_bias, video_shape = video_shape)
tokens = rearrange(tokens, '(b t) (h w) d -> b t h w d', b = b, h = h , w = w)
# encode - temporal
tokens = rearrange(tokens, 'b t h w d -> (b h w) t d')
tokens = self.enc_temporal_transformer(tokens, video_shape = video_shape)
tokens = rearrange(tokens, '(b h w) t d -> b t h w d', b = b, h = h, w = w)
return tokens
def decode(
self,
tokens
):
b = tokens.shape[0]
h, w = self.patch_height_width
if tokens.ndim == 3:
tokens = rearrange(tokens, 'b (t h w) d -> b t h w d', h = h, w = w)
video_shape = tuple(tokens.shape[:-1])
# decode - temporal
tokens = rearrange(tokens, 'b t h w d -> (b h w) t d')
tokens = self.dec_temporal_transformer(tokens, video_shape = video_shape)
tokens = rearrange(tokens, '(b h w) t d -> b t h w d', b = b, h = h, w = w)
# decode - spatial
tokens = rearrange(tokens, 'b t h w d -> (b t) (h w) d')
attn_bias = self.spatial_rel_pos_bias(h, w, device = tokens.device)
tokens = self.dec_spatial_transformer(tokens, attn_bias = attn_bias, video_shape = video_shape)
tokens = rearrange(tokens, '(b t) (h w) d -> b t h w d', b = b, h = h , w = w)
# to pixels
first_frame_token, rest_frames_tokens = tokens[:, :1], tokens[:, 1:]
first_frame = self.to_pixels_first_frame(first_frame_token)
rest_frames = self.to_pixels(rest_frames_tokens)
recon_video = torch.cat((first_frame, rest_frames), dim = 2)
return recon_video
def forward(
self,
video,
mask = None,
return_recons = False,
return_recons_only = False,
return_discr_loss = False,
apply_grad_penalty = True,
return_only_codebook_ids = False
):
assert video.ndim in {4, 5}
is_image = video.ndim == 4
if is_image:
video = rearrange(video, 'b c h w -> b c 1 h w')
assert not exists(mask)
b, c, f, *image_dims, device = *video.shape, video.device
assert tuple(image_dims) == self.image_size
assert not exists(mask) or mask.shape[-1] == f
assert divisible_by(f - 1, self.temporal_patch_size), f'number of frames ({f}) minus one ({f - 1}) must be divisible by temporal patch size ({self.temporal_patch_size})'
first_frame, rest_frames = video[:, :, :1], video[:, :, 1:]
# derive patches
first_frame_tokens = self.to_patch_emb_first_frame(first_frame)
rest_frames_tokens = self.to_patch_emb(rest_frames)
tokens = torch.cat((first_frame_tokens, rest_frames_tokens), dim = 1)
# save height and width in
shape = tokens.shape
*_, h, w, _ = shape
# encode - spatial
tokens = self.encode(tokens)
# quantize
tokens, packed_fhw_shape = pack([tokens], 'b * d')
vq_mask = None
if exists(mask):
vq_mask = self.calculate_video_token_mask(video, mask)
tokens, indices, commit_loss = self.vq(tokens, mask = vq_mask)
if return_only_codebook_ids:
indices, = unpack(indices, packed_fhw_shape, 'b *')
return indices
tokens = rearrange(tokens, 'b (t h w) d -> b t h w d', h = h, w = w)
recon_video = self.decode(tokens)
returned_recon = rearrange(recon_video, 'b c 1 h w -> b c h w') if is_image else recon_video.clone()
if return_recons_only:
return returned_recon
if exists(mask):
# variable lengthed video / images training
recon_loss = F.mse_loss(video, recon_video, reduction = 'none')
recon_loss = recon_loss[repeat(mask, 'b t -> b c t', c = c)]
recon_loss = recon_loss.mean()
else:
recon_loss = F.mse_loss(video, recon_video)
# prepare a random frame index to be chosen for discriminator and perceptual loss
pick_frame_logits = torch.randn(b, f)
if exists(mask):
mask_value = -torch.finfo(pick_frame_logits.dtype).max
pick_frame_logits = pick_frame_logits.masked_fill(~mask, mask_value)
frame_indices = pick_frame_logits.topk(1, dim = -1).indices
# whether to return discriminator loss
if return_discr_loss:
assert exists(self.discr), 'discriminator must exist to train it'
video = pick_video_frame(video, frame_indices)
recon_video = pick_video_frame(recon_video, frame_indices)
recon_video = recon_video.detach()
video.requires_grad_()
recon_video_discr_logits, video_discr_logits = map(self.discr, (recon_video, video))
discr_loss = self.discr_loss(recon_video_discr_logits, video_discr_logits)
if apply_grad_penalty:
gp = gradient_penalty(video, video_discr_logits)
loss = discr_loss + gp
if return_recons:
return loss, returned_recon
return loss
# early return if training on grayscale
if not self.use_vgg_and_gan:
if return_recons:
return recon_loss, returned_recon
return recon_loss
# perceptual loss
input_vgg_input = pick_video_frame(video, frame_indices)
recon_vgg_input = pick_video_frame(recon_video, frame_indices)
# handle grayscale for vgg
if video.shape[1] == 1:
input_vgg_input, recon_vgg_input = map(lambda t: repeat(t, 'b 1 ... -> b c ...', c = 3), (img_vgg_input, fmap_vgg_input))
input_vgg_feats = self.vgg(input_vgg_input)
recon_vgg_feats = self.vgg(recon_vgg_input)
perceptual_loss = F.mse_loss(input_vgg_feats, recon_vgg_feats)
# generator loss
gen_loss = self.gen_loss(self.discr(recon_vgg_input))
# calculate adaptive weight
last_dec_layer = self.to_pixels[0].weight
norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p = 2)
norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(p = 2)
adaptive_weight = safe_div(norm_grad_wrt_perceptual_loss, norm_grad_wrt_gen_loss)
adaptive_weight.clamp_(max = 1e4)
# combine losses
loss = recon_loss + perceptual_loss + commit_loss + adaptive_weight * gen_loss
if return_recons:
return loss, returned_recon
return loss
| phenaki-pytorch-main | phenaki_pytorch/cvivit.py |
from math import sqrt
from random import choice
from pathlib import Path
from shutil import rmtree
from beartype import beartype
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
from einops import rearrange
from phenaki_pytorch.optimizer import get_optimizer
from ema_pytorch import EMA
from phenaki_pytorch.cvivit import CViViT
from phenaki_pytorch.data import ImageDataset, VideoDataset, video_tensor_to_gif
from accelerate import Accelerator
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# main trainer class
@beartype
class CViViTTrainer(nn.Module):
def __init__(
self,
vae: CViViT,
*,
num_train_steps,
batch_size,
folder,
train_on_images = False,
num_frames = 17,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
discr_max_grad_norm = None,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
valid_frac = 0.05,
random_split_seed = 42,
use_ema = True,
ema_beta = 0.995,
ema_update_after_step = 0,
ema_update_every = 1,
apply_grad_penalty_every = 4,
accelerate_kwargs: dict = dict()
):
super().__init__()
image_size = vae.image_size
self.accelerator = Accelerator(**accelerate_kwargs)
self.vae = vae
self.use_ema = use_ema
if self.is_main and use_ema:
self.ema_vae = EMA(vae, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
all_parameters = set(vae.parameters())
discr_parameters = set(vae.discr.parameters())
vae_parameters = all_parameters - discr_parameters
self.vae_parameters = vae_parameters
self.optim = get_optimizer(vae_parameters, lr = lr, wd = wd)
self.discr_optim = get_optimizer(discr_parameters, lr = lr, wd = wd)
self.max_grad_norm = max_grad_norm
self.discr_max_grad_norm = discr_max_grad_norm
# create dataset
dataset_klass = ImageDataset if train_on_images else VideoDataset
if train_on_images:
self.ds = ImageDataset(folder, image_size)
else:
self.ds = VideoDataset(folder, image_size, num_frames = num_frames)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
# dataloader
self.dl = DataLoader(
self.ds,
batch_size = batch_size,
shuffle = True
)
self.valid_dl = DataLoader(
self.valid_ds,
batch_size = batch_size,
shuffle = True
)
# prepare with accelerator
(
self.vae,
self.optim,
self.discr_optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.vae,
self.optim,
self.discr_optim,
self.dl,
self.valid_dl
)
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?'):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
def save(self, path):
if not self.accelerator.is_local_main_process:
return
pkg = dict(
model = self.accelerator.get_state_dict(self.vae),
optim = self.optim.state_dict(),
discr_optim = self.discr_optim.state_dict()
)
torch.save(pkg, path)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(path)
vae = self.accelerator.unwrap_model(self.vae)
vae.load_state_dict(pkg['model'])
self.optim.load_state_dict(pkg['optim'])
self.discr_optim.load_state_dict(pkg['discr_optim'])
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
apply_grad_penalty = not (steps % self.apply_grad_penalty_every)
self.vae.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
img = next(self.dl_iter)
img = img.to(device)
with self.accelerator.autocast():
loss = self.vae(
img,
apply_grad_penalty = apply_grad_penalty
)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.vae.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# update discriminator
if exists(self.vae.discr):
self.discr_optim.zero_grad()
for _ in range(self.grad_accum_every):
img = next(self.dl_iter)
img = img.to(device)
loss = self.vae(img, return_discr_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'discr_loss': loss.item() / self.grad_accum_every})
if exists(self.discr_max_grad_norm):
self.accelerator.clip_grad_norm_(self.vae.discr.parameters(), self.discr_max_grad_norm)
self.discr_optim.step()
# log
self.print(f"{steps}: vae loss: {logs['loss']} - discr loss: {logs['discr_loss']}")
# update exponential moving averaged generator
if self.is_main and self.use_ema:
self.ema_vae.update()
# sample results every so often
if self.is_main and not (steps % self.save_results_every):
vaes_to_evaluate = ((self.vae, str(steps)),)
if self.use_ema:
vaes_to_evaluate = ((self.ema_vae.ema_model, f'{steps}.ema'),) + vaes_to_evaluate
for model, filename in vaes_to_evaluate:
model.eval()
valid_data = next(self.valid_dl_iter)
is_video = valid_data.ndim == 5
valid_data = valid_data.to(device)
recons = model(valid_data, return_recons_only = True)
# if is video, save gifs to folder
# else save a grid of images
if is_video:
sampled_videos_path = self.results_folder / f'samples.{filename}'
(sampled_videos_path).mkdir(parents = True, exist_ok = True)
for tensor in recons.unbind(dim = 0):
video_tensor_to_gif(tensor, str(sampled_videos_path / f'{filename}.gif'))
else:
imgs_and_recons = torch.stack((valid_data, recons), dim = 0)
imgs_and_recons = rearrange(imgs_and_recons, 'r b ... -> (b r) ...')
imgs_and_recons = imgs_and_recons.detach().cpu().float().clamp(0., 1.)
grid = make_grid(imgs_and_recons, nrow = 2, normalize = True, value_range = (0, 1))
logs['reconstructions'] = grid
save_image(grid, str(self.results_folder / f'{filename}.png'))
self.print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
if self.is_main and not (steps % self.save_model_every):
state_dict = self.vae.state_dict()
model_path = str(self.results_folder / f'vae.{steps}.pt')
torch.save(state_dict, model_path)
if self.use_ema:
ema_state_dict = self.ema_vae.state_dict()
model_path = str(self.results_folder / f'vae.{steps}.ema.pt')
torch.save(ema_state_dict, model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
device = next(self.vae.parameters()).device
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
| phenaki-pytorch-main | phenaki_pytorch/cvivit_trainer.py |
import torch
import transformers
from transformers import T5Tokenizer, T5EncoderModel, T5Config
# less warning messages since only using encoder
transformers.logging.set_verbosity_error()
# helper functions
def exists(val):
return val is not None
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config = config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
raise ValueError(f'unknown t5 name {name}')
return config.d_model
# encoding text
def t5_encode_text(
texts,
name = DEFAULT_T5_NAME,
output_device = None
):
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = 'pt',
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.no_grad():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask[..., None].bool()
if not exists(output_device):
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
encoded_text = encoded_text.to(output_device)
attn_mask = attn_mask.to(output_device)
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
| phenaki-pytorch-main | phenaki_pytorch/t5.py |
import math
import functools
from contextlib import nullcontext
from functools import partial, wraps
from typing import Optional, List, Union
from beartype import beartype
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from phenaki_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
from phenaki_pytorch.cvivit import CViViT
from phenaki_pytorch.attention import Attention, Transformer, ContinuousPositionBias
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else (val,) * length
def reduce_mult(arr):
return functools.reduce(lambda x, y: x * y, arr)
def divisible_by(numer, denom):
return (numer % denom) == 0
# tensor helpers
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
num_tokens = mask.sum(dim = -1)
num_pads = seq_len - num_tokens
num_masked = (prob * num_tokens).round().clamp(min = 1)
randperm_indices = torch.rand((batch, seq_len), device = device).argsort(dim = -1)
randperm_indices -= rearrange(num_pads, 'b -> b 1')
randperm_indices.masked_fill_(randperm_indices < 0, seq_len) # set to max out of bounds, so never chosen
mask_subset = randperm_indices < rearrange(num_masked, 'b -> b 1')
return mask_subset
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# classifier free guidance functions
def uniform(shape, device):
return torch.zeros(shape, device = device).float().uniform_(0, 1)
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# tensor helper functions
def log(t, eps = 1e-10):
return torch.log(t + eps)
# sampling helpers
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# mask git
class MaskGit(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
max_seq_len,
gradient_shrink_alpha = 0.1,
heads = 8,
dim_head = 64,
unconditional = False,
attn_dropout = 0.,
ff_dropout = 0.,
**kwargs
):
super().__init__()
self.dim = dim
self.mask_id = num_tokens
self.unconditional = unconditional
self.token_emb = nn.Embedding(num_tokens + 1, dim) # last token is used as mask_id
self.max_seq_len = max_seq_len
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.gradient_shrink_alpha = gradient_shrink_alpha # used with great success in cogview and GLM 130B attention net
self.continuous_pos_bias = ContinuousPositionBias(dim = dim_head, heads = heads, num_dims = 3)
self.transformer = Transformer(
dim = dim,
attn_num_null_kv = 2,
has_cross_attn = not self.unconditional,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
peg = True,
**kwargs
)
self.to_logits = nn.Linear(dim, num_tokens)
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
cond_drop_prob = 0.,
text_mask = None,
video_mask = None,
video_patch_shape = None,
return_embeds = False,
**kwargs
):
assert x.ndim in {2, 4}, 'video token ids must be of shape (batch, seq) or (batch, frame, height, width)'
if x.ndim == 4:
video_patch_shape = x.shape[1:]
x = rearrange(x, 'b ... -> b (...)')
b, n, device = *x.shape, x.device
if not exists(text_mask):
text_mask = torch.ones((b, n), device = device, dtype = torch.bool)
assert exists(video_patch_shape), 'video patch shape must be given'
rel_pos_bias = self.continuous_pos_bias(*video_patch_shape, device = device)
if cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
video_shape = (b, *video_patch_shape)
x = self.token_emb(x)
assert n <= self.max_seq_len, f'the video token sequence length you are passing in ({n}) is greater than the `max_seq_len` ({self.max_seq_len}) set on your `MaskGit`'
x = self.pos_emb(torch.arange(n, device = device)) + x
x = x * self.gradient_shrink_alpha + x.detach() * (1 - self.gradient_shrink_alpha)
x = self.transformer(
x,
video_shape = video_shape,
attn_bias = rel_pos_bias,
self_attn_mask = video_mask,
cross_attn_context_mask = text_mask,
**kwargs
)
if return_embeds:
return x
return self.to_logits(x)
# token critic
class TokenCritic(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
max_seq_len,
has_cross_attn = False,
attn_dropout = 0.,
ff_dropout = 0.,
**kwargs
):
super().__init__()
self.has_cross_attn = has_cross_attn
self.mask_id = num_tokens
self.token_emb = nn.Embedding(num_tokens + 1, dim) # last token is used as mask_id
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.transformer = Transformer(
dim = dim,
peg = True,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
has_cross_attn = has_cross_attn,
**kwargs
)
self.to_logits = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... 1 -> ...')
)
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
text_mask = None,
cond_drop_prob = None,
context = None,
video_mask = None,
video_patch_shape = None,
**kwargs
):
if exists(video_patch_shape):
video_shape = (x.shape[0], *video_patch_shape)
else:
video_shape = x.shape
x = rearrange(x, 'b ... -> b (...)')
b, n, device = *x.shape, x.device
if not exists(text_mask):
text_mask = torch.ones((b, n), device = device, dtype = torch.bool)
if exists(context) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
x = self.token_emb(x)
x = self.pos_emb(torch.arange(n, device = device)) + x
x = self.transformer(
x,
video_shape = video_shape,
context = context,
self_attn_mask = video_mask,
cross_attn_context_mask = text_mask,
**kwargs
)
return self.to_logits(x)
# self critic - inspired by Nijkamp et al. (https://aclanthology.org/2021.naacl-main.409/)
@beartype
class SelfCritic(nn.Module):
def __init__(
self,
maskgit: MaskGit
):
super().__init__()
self.maskgit = maskgit
self.to_pred = nn.Sequential(
nn.Linear(maskgit.dim, 1),
Rearrange('... 1 -> ...')
)
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(self, x, *args, **kwargs):
embeds = self.maskgit(x, *args, return_embeds = True, **kwargs)
return self.to_pred(embeds)
# main class
@beartype
class Phenaki(nn.Module):
def __init__(
self,
*,
maskgit: MaskGit,
cvivit: CViViT,
critic: Optional[Union[TokenCritic, SelfCritic]] = None,
steps = 18, # 18 is the ideal steps with token critic
t5_name = DEFAULT_T5_NAME,
sample_temperature = 0.,
text_embed_dim = None,
cond_drop_prob = 0.25,
max_text_len = 128,
self_token_critic = False,
critic_loss_weight = 1.,
critic_noise_anneal_schedule = 'decay',
critic_train_sample_temperature = 1.
):
super().__init__()
self.cvivit = cvivit.copy_for_eval()
self.maskgit = maskgit
self.unconditional = maskgit.unconditional
self.mask_id = maskgit.mask_id
assert not (self_token_critic and exists(critic))
# sampling
if self_token_critic:
critic = SelfCritic(maskgit)
if exists(critic):
critic = critic.eval()
assert not exists(critic) or self_token_critic or (not maskgit.unconditional) == critic.has_cross_attn
self.critic = critic
self.critic_noise_anneal_schedule = critic_noise_anneal_schedule
self.critic_loss_weight = critic_loss_weight
self.critic_train_sample_temperature = critic_train_sample_temperature
self.steps = steps
self.sample_temperature = sample_temperature
# text conditioning
text_embed_dim = default(text_embed_dim, get_encoded_dim(t5_name))
self.encode_texts = partial(t5_encode_text, name = t5_name)
self.text_embed_dim = text_embed_dim
self.max_text_len = max_text_len
assert cond_drop_prob > 0.
self.cond_drop_prob = cond_drop_prob # classifier free guidance for transformers - @crowsonkb
def sample_images(
self,
*,
texts: Union[List[str], str] = None,
batch_size = 1,
cond_scale = 3.,
starting_temperature = 0.9,
noise_K = 1.
):
single_framed_video = self.sample(
texts = texts,
num_frames = 1,
cond_scale = cond_scale,
starting_temperature = starting_temperature,
noise_K = noise_K
)
return rearrange(single_framed_video, '... c 1 h w -> ... c h w')
@eval_decorator
@torch.no_grad()
def sample(
self,
*,
num_frames,
texts: Union[List[str], str] = None,
prime_frames = None,
batch_size = 1,
cond_scale = 3.,
starting_temperature = 0.9,
noise_K = 1. # hyperparameter for noising of critic score in section 3.2 of token-critic paper, need to find correct value
):
device = next(self.parameters()).device
# derive the priming token ids, to be prepended to the input being demasked by mask-git at each round
has_prime = exists(prime_frames)
prime_token_ids = None
prime_token_length = 0
prime_num_frames = 0
if has_prime:
with torch.no_grad():
prime_token_ids = self.cvivit(prime_frames, return_only_codebook_ids = True)
patch_shape = prime_token_ids.shape[1:]
prime_token_ids = rearrange(prime_token_ids, 'b ... -> b (...)')
prime_token_length = prime_token_ids.shape[-1]
prime_num_frames = prime_frames.shape[2]
num_tokens = self.cvivit.num_tokens_per_frames(num_frames, include_first_frame = not exists(prime_frames))
# get text embeds and mask
text_embeds = text_mask = None
if exists(texts):
if isinstance(texts, str):
texts = [texts]
with torch.no_grad():
text_embeds = self.encode_texts(texts, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
batch_size = len(texts)
# derive video patch shape
patch_shape = self.cvivit.get_video_patch_shape(num_frames + prime_num_frames, include_first_frame = True)
# get video token ids
shape = (batch_size, num_tokens)
video_token_ids = torch.full(shape, self.mask_id, device = device)
mask = torch.ones(shape, device = device, dtype = torch.bool)
scores = None # keeping track of the confidence or critic scores, determining what should be masked at the next iteration
for step in range(self.steps):
is_first_step = step == 0
is_last_step = step == (self.steps - 1)
steps_til_x0 = self.steps - (step + 1)
if not is_first_step and exists(scores):
time = torch.full((1,), step / self.steps, device = device)
num_tokens_mask = (num_tokens * torch.cos(time * math.pi * 0.5)).round().long().clamp(min = 1)
_, indices = scores.topk(num_tokens_mask.item(), dim = -1)
mask = torch.zeros(shape, device = device).scatter(1, indices, 1).bool()
video_token_ids = torch.where(mask, self.mask_id, video_token_ids)
input_token_ids = video_token_ids if not has_prime else torch.cat((prime_token_ids, video_token_ids), dim = -1)
logits = self.maskgit.forward_with_cond_scale(
input_token_ids,
video_patch_shape = patch_shape,
context = text_embeds,
text_mask = text_mask,
cond_scale = cond_scale
)
if has_prime:
logits = logits[:, prime_token_length:]
temperature = starting_temperature * (steps_til_x0 / self.steps)
pred_video_ids = gumbel_sample(logits, temperature = temperature)
video_token_ids = torch.where(mask, pred_video_ids, video_token_ids)
if not is_last_step:
if exists(self.critic):
critic_kwargs = dict(
video_patch_shape = patch_shape,
context = text_embeds,
text_mask = text_mask,
cond_scale = cond_scale
)
with torch.no_grad():
critic_input_token_ids = video_token_ids if not has_prime else torch.cat((prime_token_ids, video_token_ids), dim = -1)
scores = self.critic.forward_with_cond_scale(
critic_input_token_ids,
**critic_kwargs
)
if has_prime:
scores = scores[:, prime_token_length:]
# different types of annealing
if self.critic_noise_anneal_schedule == 'fixed':
noise_multiplier = 1.
elif self.critic_noise_anneal_schedule == 'decay':
noise_multiplier = steps_til_x0 / self.steps
elif self.critic_noise_anneal_schedule == 'increase':
noise_multiplier = (step + 1) / self.steps
else:
raise ValueError(f'invalid critic noise anneal schedule name')
# noise to add to critic scores
noise = noise_K * (uniform(scores.shape, device) - 0.5) * noise_multiplier
scores = scores + noise
else:
probs = logits.softmax(dim = -1)
scores = probs.gather(2, rearrange(pred_video_ids, '... -> ... 1'))
scores = 1 - rearrange(scores, '... 1 -> ...')
scores = torch.where(mask, scores, -1e4)
if has_prime:
video_token_ids = torch.cat((prime_token_ids, video_token_ids), dim = -1)
video = self.cvivit.decode_from_codebook_indices(video_token_ids)
if has_prime:
video = video[:, :, prime_num_frames:]
return video
def forward(
self,
videos = None,
*,
texts: Optional[List[str]] = None,
video_codebook_ids = None,
video_frame_mask = None,
text_embeds = None,
cond_drop_prob = None,
only_train_generator = False,
only_train_critic = False
):
assert not (only_train_generator and only_train_critic)
assert exists(videos) ^ exists(video_codebook_ids), 'either raw video or '
assert not (exists(videos) and not exists(self.cvivit)), 'cvivit must be provided if one wants to encode the videos live during training'
assert (exists(text_embeds) ^ exists(texts)) ^ self.unconditional, 'either raw text of text embeds must be given, and if unconditional, none should be given'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), 'text embedding dimension is not correct'
if not exists(video_codebook_ids):
assert videos.ndim in {4, 5}
if videos.ndim == 4:
videos = rearrange(videos, 'b c h w -> b c 1 h w')
with torch.no_grad():
self.cvivit.eval()
video_codebook_ids = self.cvivit(videos, return_only_codebook_ids = True)
# derive text embeddings, mask, conditional dropout
text_mask = None
cond_drop_prob = 0
if not self.unconditional:
if not exists(text_embeds):
with torch.no_grad():
text_embeds = self.encode_texts(texts, output_device = video_codebook_ids.device)
text_mask = torch.any(text_embeds != 0, dim = -1) # save the researcher from having to think about mask, by assuming if all of the feature dimension is 0, it is masked out
# condition dropout for Katherine's (@crowsonkb) version of classifier free guidance for transformers
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
# calculate video frame mask
video_mask = None
if exists(video_frame_mask):
video_mask = self.cvivit.calculate_video_token_mask(
videos,
video_frame_mask = video_frame_mask
)
# train maskgit with text condition
video_codebook_ids, packed_shape = pack([video_codebook_ids], 'b *')
batch, seq, device = *video_codebook_ids.shape, video_codebook_ids.device
rand_step = torch.randint(0, self.steps, (batch,), device = device)
mask_token_prob = torch.cos(rand_step * math.pi * 0.5 / self.steps) # cosine schedule was best
if not exists(video_mask):
video_mask = torch.ones((batch, seq), device = device).bool()
mask_token_mask = get_mask_subset_with_prob(video_mask, mask_token_prob)
masked_input = torch.where(mask_token_mask, self.mask_id, video_codebook_ids)
masked_input, = unpack(masked_input, packed_shape, 'b *')
maskgit_forward_context = torch.no_grad if only_train_critic else nullcontext
with maskgit_forward_context():
logits = self.maskgit(
masked_input,
video_mask = video_mask,
cond_drop_prob = cond_drop_prob,
text_mask = text_mask,
context = text_embeds
)
if not only_train_critic:
loss = F.cross_entropy(
logits[mask_token_mask],
video_codebook_ids[mask_token_mask]
)
if not exists(self.critic) or only_train_generator:
return loss
# sample the predicted masked tokens
pred_video_ids = gumbel_sample(logits, temperature = self.critic_train_sample_temperature)
# derive critic input
critic_input = torch.where(mask_token_mask, pred_video_ids, video_codebook_ids)
# critic may or may not need text conditioning
critic_input, = unpack(critic_input, packed_shape, 'b *')
pred_fake_or_real_logits = self.critic(
critic_input,
video_mask = video_mask,
cond_drop_prob = cond_drop_prob,
text_mask = text_mask,
context = text_embeds
)
critic_labels = (video_codebook_ids != pred_video_ids).float()
critic_loss = F.binary_cross_entropy_with_logits(
pred_fake_or_real_logits,
critic_labels
)
critic_loss_weight = self.critic_loss_weight
if only_train_critic:
loss = 0
critic_loss_weight = 1.
return loss + critic_loss * critic_loss_weight
# make video function
@beartype
def make_video(
phenaki: Phenaki,
texts: List[str],
num_frames,
prime_lengths
):
num_scenes = len(texts)
num_frames = cast_tuple(num_frames, num_scenes)
prime_lengths = cast_tuple(prime_lengths, num_scenes - 1)
prime_lengths = (*prime_lengths, 0) # last scene needs no priming
entire_video = []
video_prime = None
scenes = []
for text, scene_num_frames, next_scene_prime_length in zip(texts, num_frames, prime_lengths):
video = phenaki.sample(texts = text, prime_frames = video_prime, num_frames = scene_num_frames)
scenes.append(video)
video_prime = video[:, :, -next_scene_prime_length:]
return torch.cat(scenes, dim = 2), scenes
| phenaki-pytorch-main | phenaki_pytorch/phenaki_pytorch.py |
from phenaki_pytorch.phenaki_pytorch import Phenaki, CViViT, MaskGit, TokenCritic, make_video
from phenaki_pytorch.cvivit_trainer import CViViTTrainer
from phenaki_pytorch.phenaki_trainer import PhenakiTrainer
| phenaki-pytorch-main | phenaki_pytorch/__init__.py |
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
if group_wd_params:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
| phenaki-pytorch-main | phenaki_pytorch/optimizer.py |
from pathlib import Path
import cv2
from PIL import Image
from functools import partial
from typing import Tuple, List
from beartype.door import is_bearable
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader as PytorchDataLoader
from torchvision import transforms as T, utils
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def pair(val):
return val if isinstance(val, tuple) else (val, val)
def cast_num_frames(t, *, frames):
f = t.shape[1]
if f == frames:
return t
if f > frames:
return t[:, :frames]
return F.pad(t, (0, 0, 0, 0, 0, frames - f))
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# image related helpers fnuctions and dataset
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
print(f'{len(self.paths)} training samples found at {folder}')
self.transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(image_size),
T.RandomHorizontalFlip(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# tensor of shape (channels, frames, height, width) -> gif
# handle reading and writing gif
CHANNELS_TO_MODE = {
1 : 'L',
3 : 'RGB',
4 : 'RGBA'
}
def seek_all_images(img, channels = 3):
assert channels in CHANNELS_TO_MODE, f'channels {channels} invalid'
mode = CHANNELS_TO_MODE[channels]
i = 0
while True:
try:
img.seek(i)
yield img.convert(mode)
except EOFError:
break
i += 1
# tensor of shape (channels, frames, height, width) -> gif
def video_tensor_to_gif(
tensor,
path,
duration = 120,
loop = 0,
optimize = True
):
images = map(T.ToPILImage(), tensor.unbind(dim = 1))
first_img, *rest_imgs = images
first_img.save(path, save_all = True, append_images = rest_imgs, duration = duration, loop = loop, optimize = optimize)
return images
# gif -> (channels, frame, height, width) tensor
def gif_to_tensor(
path,
channels = 3,
transform = T.ToTensor()
):
img = Image.open(path)
tensors = tuple(map(transform, seek_all_images(img, channels = channels)))
return torch.stack(tensors, dim = 1)
# handle reading and writing mp4
def video_to_tensor(
path: str, # Path of the video to be imported
num_frames = -1, # Number of frames to be stored in the output tensor
crop_size = None
) -> torch.Tensor: # shape (1, channels, frames, height, width)
video = cv2.VideoCapture(path)
frames = []
check = True
while check:
check, frame = video.read()
if not check:
continue
if exists(crop_size):
frame = crop_center(frame, *pair(crop_size))
frames.append(rearrange(frame, '... -> 1 ...'))
frames = np.array(np.concatenate(frames[:-1], axis = 0)) # convert list of frames to numpy array
frames = rearrange(frames, 'f h w c -> c f h w')
frames_torch = torch.tensor(frames).float()
return frames_torch[:, :num_frames, :, :]
def tensor_to_video(
tensor, # Pytorch video tensor
path: str, # Path of the video to be saved
fps = 25, # Frames per second for the saved video
video_format = 'MP4V'
):
# Import the video and cut it into frames.
tensor = tensor.cpu()
num_frames, height, width = tensor.shape[-3:]
fourcc = cv2.VideoWriter_fourcc(*video_format) # Changes in this line can allow for different video formats.
video = cv2.VideoWriter(path, fourcc, fps, (width, height))
frames = []
for idx in range(num_frames):
numpy_frame = tensor[:, idx, :, :].numpy()
numpy_frame = np.uint8(rearrange(numpy_frame, 'c h w -> h w c'))
video.write(numpy_frame)
video.release()
cv2.destroyAllWindows()
return video
def crop_center(
img, # tensor
cropx, # Length of the final image in the x direction.
cropy # Length of the final image in the y direction.
) -> torch.Tensor:
y, x, c = img.shape
startx = x // 2 - cropx // 2
starty = y // 2 - cropy // 2
return img[starty:(starty + cropy), startx:(startx + cropx), :]
# video dataset
class VideoDataset(Dataset):
def __init__(
self,
folder,
image_size,
channels = 3,
num_frames = 17,
horizontal_flip = False,
force_num_frames = True,
exts = ['gif', 'mp4']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.channels = channels
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
self.transform = T.Compose([
T.Resize(image_size),
T.RandomHorizontalFlip() if horizontal_flip else T.Lambda(identity),
T.CenterCrop(image_size),
T.ToTensor()
])
# functions to transform video path to tensor
self.gif_to_tensor = partial(gif_to_tensor, channels = self.channels, transform = self.transform)
self.mp4_to_tensor = partial(video_to_tensor, crop_size = self.image_size)
self.cast_num_frames_fn = partial(cast_num_frames, frames = num_frames) if force_num_frames else identity
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
ext = path.suffix
if ext == '.gif':
tensor = self.gif_to_tensor(path)
elif ext == '.mp4':
tensor = self.mp4_to_tensor(str(path))
else:
raise ValueError(f'unknown extension {ext}')
return self.cast_num_frames_fn(tensor)
# override dataloader to be able to collate strings
def collate_tensors_and_strings(data):
if is_bearable(data, List[torch.Tensor]):
return (torch.stack(data, dim = 0),)
data = zip(*data)
output = []
for datum in data:
if is_bearable(datum, Tuple[torch.Tensor, ...]):
datum = torch.stack(datum, dim = 0)
elif is_bearable(datum, Tuple[str, ...]):
datum = list(datum)
else:
raise ValueError('detected invalid type being passed from dataset')
output.append(datum)
return tuple(output)
def DataLoader(*args, **kwargs):
return PytorchDataLoader(*args, collate_fn = collate_tensors_and_strings, **kwargs)
| phenaki-pytorch-main | phenaki_pytorch/data.py |
from setuptools import setup, find_packages
setup(
name = 'adjacent-attention-pytorch',
packages = find_packages(),
version = '0.0.12',
license='MIT',
description = 'Adjacent Attention Network - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/adjacent-attention-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'graph neural network',
'transformers'
],
install_requires=[
'einops>=0.3',
'torch>=1.6',
'isab-pytorch<0.2'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| adjacent-attention-network-main | setup.py |
from adjacent_attention_network.adjacent_attention_network import AdjacentAttentionNetwork
| adjacent-attention-network-main | adjacent_attention_network/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from isab_pytorch import ISAB
# helpers
def exists(val):
return val is not None
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, **kwargs):
return self.net(x)
# adjacent attention class
class AdjacentAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 4,
dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.null_k = nn.Parameter(torch.randn(heads, dim_head))
self.null_v = nn.Parameter(torch.randn(heads, dim_head))
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
adj_kv_indices,
mask
):
b, n, d, h = *x.shape, self.heads
flat_indices = repeat(adj_kv_indices, 'b n a -> (b h) (n a)', h = h)
# derive query, key, value
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# gather keys and values according to adjacency matrix
k, v = map(lambda t: rearrange(t, 'b h n d -> (b h) n d'), (k, v))
k = batched_index_select(k, flat_indices)
v = batched_index_select(v, flat_indices)
k, v = map(lambda t: rearrange(t, '(b h) (n a) d -> b h n a d', h = h, n = n), (k, v))
# add null key / value, so a node can attend to nothing
# have come across this in GNN literature as some other name
nk, nv = map(lambda t: rearrange(t, 'h d -> () h () () d').expand(b, -1, n, 1, -1), (self.null_k, self.null_v))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
mask = F.pad(mask, (1, 0), value = 1)
# similarity of each node to its neighbors
sim = einsum('b h n d, b h n a d -> b h n a', q, k) * self.scale
# mask out neighbors that are just padding
mask_value = -torch.finfo(sim.dtype).max
mask = rearrange(mask.bool(), 'b n a -> b () n a')
sim.masked_fill_(~mask.bool(), mask_value)
# attention
attn = sim.softmax(dim = -1)
# dropout
attn = self.dropout(attn)
# get weighted average of the values of all neighbors
out = einsum('b h n a, b h n a d -> b h n d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
# combine output
return self.to_out(out)
# adjacent network (layers of adjacent attention)
class AdjacentAttentionNetwork(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 4,
num_neighbors_cutoff = None,
num_global_nodes = 0,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.num_neighbors_cutoff = num_neighbors_cutoff
self.layers = nn.ModuleList([])
for _ in range(depth):
global_attn = PreNorm(dim, ISAB(
dim = dim,
heads = heads,
num_induced_points = num_global_nodes
)) if num_global_nodes > 0 else None
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, AdjacentAttention(
dim = dim,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout
))),
global_attn,
Residual(PreNorm(dim, FeedForward(
dim = dim,
dropout = ff_dropout
)))
]))
def forward(self, x, adjacency_mat, mask = None):
device, n = x.device, x.shape[1]
diag = torch.eye(adjacency_mat.shape[-1], device = device).bool()
adjacency_mat |= diag # nodes should pay attention itself (self-interacting)
# zero out points on adjacency matrix
# where the nodes are just padding
if exists(mask):
adjacency_mat &= (mask[:, :, None] * mask[:, None, :])
adj_mat = adjacency_mat.float()
# if we don't set a hard limit to the number of neighbors:
# - get the maximum number of neighbors and pad the rest of the nodes with less than that number of neighbors
# else:
# - randomly sample the cutoff number of neighbors for any node that exceeds the max
# - this would be similar to random sparse attention (bigbird)
# get the maximum number of neighbors
max_neighbors = int(adj_mat.sum(dim = -1).max())
if exists(self.num_neighbors_cutoff) and max_neighbors > self.num_neighbors_cutoff:
# to randomly sample the neighbors, add a small uniform noise to the mask and topk
noise = torch.empty((n, n), device = device).uniform_(-0.01, 0.01)
adj_mat = adj_mat + noise
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = self.num_neighbors_cutoff)
# cast the mask back to 0s and 1s
adj_mask = (adj_mask > 0.5).float()
else:
# todo - get distribution of number of neighbors, and strategically break up attention (message passing) to multiple steps
# - start with a bimodal num neighbors test case, then generalize
# use topk to get all the neighbors
# also pass the mask into the attention, as some neighbors will be just padding and not actually neighbors
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = max_neighbors)
for attn, global_attn, ff in self.layers:
x = attn(
x,
adj_kv_indices = adj_kv_indices,
mask = adj_mask
)
if exists(global_attn):
out, _ = global_attn(x, mask = mask)
x = x + out
x = ff(x)
return x
| adjacent-attention-network-main | adjacent_attention_network/adjacent_attention_network.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from pathlib import Path
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, We cross-compile for Volta (compute capability 7.0), "
"Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("flash_attn")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) < 11:
raise RuntimeError("FlashAttention is only supported on CUDA 11")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_75,code=sm_75")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
subprocess.run(["git", "submodule", "update", "--init", "csrc/flash_attn/cutlass"])
ext_modules.append(
CUDAExtension(
name="flash_attn_cuda",
sources=[
"csrc/flash_attn/fmha_api.cpp",
"csrc/flash_attn/src/fmha_fprop_fp16_kernel.sm80.cu",
"csrc/flash_attn/src/fmha_dgrad_fp16_kernel_loop.sm80.cu",
"csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
"csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo"
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[
Path(this_dir) / 'csrc' / 'flash_attn',
Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Path(this_dir) / 'csrc' / 'flash_attn' / 'cutlass' / 'include',
],
)
)
setup(
name="flash_attn",
version="0.1",
packages=find_packages(
exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
),
author="Tri Dao",
author_email="[email protected]",
description="Flash Attention: Fast and Memory-Efficient Exact Attention",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/HazyResearch/flash-attention",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Unix",
],
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
python_requires=">=3.7",
install_requires=[
"torch",
"einops",
],
)
| flash-attention-main | setup.py |
# Copied from https://github.com/NVIDIA/apex/tree/master/csrc/megatron
# We add the case where seqlen = 4k and seqlen = 8k
import os
import subprocess
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
setup(
name='fused_softmax_lib',
ext_modules=[
CUDAExtension(
name='fused_softmax_lib',
sources=['fused_softmax.cpp', 'scaled_masked_softmax_cuda.cu', 'scaled_upper_triang_masked_softmax_cuda.cu'],
extra_compile_args={
'cxx': ['-O3',],
'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + cc_flag)
}
)
],
cmdclass={
'build_ext': BuildExtension
})
| flash-attention-main | csrc/fused_softmax/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--xentropy")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
name="xentropy_cuda_lib",
sources=[
"interface.cpp",
"xentropy_kernel.cu"
],
extra_compile_args={
"cxx": ["-O3"] + generator_flag,
"nvcc": append_nvcc_threads(
["-O3"]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="xentropy_cuda_lib",
version="0.1",
description="Cross-entropy loss",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| flash-attention-main | csrc/xentropy/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--fast_layer_norm")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
name="dropout_layer_norm",
sources=[
"ln_api.cpp",
"ln_fwd_cuda_kernel.cu",
"ln_bwd_semi_cuda_kernel.cu",
],
extra_compile_args={
"cxx": ["-O3"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="dropout_layer_norm",
version="0.1",
description="Fused dropout + add + layer norm",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| flash-attention-main | csrc/layer_norm/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("rotary_emb")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
'rotary_emb', [
'rotary.cpp',
'rotary_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': append_nvcc_threads([
'-O3', '--use_fast_math', '--expt-extended-lambda'
] + cc_flag)
}
)
)
setup(
name="rotary_emb",
version="0.1",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| flash-attention-main | csrc/rotary/setup.py |
import os
import subprocess
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
setup(
name='fused_dense_lib',
ext_modules=[
CUDAExtension(
name='fused_dense_lib',
sources=['fused_dense.cpp', 'fused_dense_cuda.cu'],
extra_compile_args={
'cxx': ['-O3',],
'nvcc': append_nvcc_threads(['-O3'])
}
)
],
cmdclass={
'build_ext': BuildExtension
})
| flash-attention-main | csrc/fused_dense_lib/setup.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flash_attn.layers.rotary import apply_rotary_emb_func, apply_rotary_emb_torch
is_sm8x = torch.cuda.get_device_capability('cuda') >= (8, 0)
@pytest.mark.parametrize('dtype', ([torch.float16] if not is_sm8x else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', ([torch.float16]))
@pytest.mark.parametrize('rotary_fraction', [1.0, 0.5])
# @pytest.mark.parametrize('rotary_fraction', [0.5])
@pytest.mark.parametrize('inplace', [False, True])
# @pytest.mark.parametrize('inplace', [False])
def test_rotary_single_tensor(inplace, rotary_fraction, dtype):
rtol = 1e-3
batch_size = 32
nheads = 4
seqlen = 217
headdim = 128
x = torch.randn(batch_size, seqlen, nheads, headdim, dtype=dtype, device='cuda',
requires_grad=True)
x_pt = x.detach().clone().requires_grad_()
rotary_dim = int(rotary_fraction * headdim)
assert rotary_dim % 2 == 0
angle = torch.randn(seqlen, rotary_dim // 2, device='cuda')
cos = torch.cos(angle).to(dtype=dtype)
sin = torch.sin(angle).to(dtype=dtype)
out = apply_rotary_emb_func(x, cos, sin, inplace)
out_pt = apply_rotary_emb_torch(x_pt, cos, sin)
# Numerical error if we just do any arithmetic
atol = ((out + 0.3 - 0.3) - out).abs().max().item()
assert torch.allclose(out, out_pt, rtol=rtol, atol=2 * atol)
g = torch.randn_like(out)
g_pt = g.clone() # If inplace=True, we might modify the gradient inplace
out.backward(g)
out_pt.backward(g_pt)
atol = ((x_pt.grad + 0.3 - 0.3) - x_pt.grad).abs().max().item()
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=2 * atol)
| flash-attention-main | tests/test_rotary.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from flash_attn.flash_attn_interface import flash_attn_func, flash_attn_unpadded_qkvpacked_func, _get_block_size, flash_attn_unpadded_kvpacked_func, flash_attn_unpadded_func
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_split_func
from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis
try:
from flash_attn.flash_attn_triton import flash_attn_func
except (ImportError, AttributeError): # Older version of Triton doesn't have tl.constexpr
flash_attn_func = None
is_sm75 = torch.cuda.get_device_capability('cuda') == (7, 5)
is_sm80 = torch.cuda.get_device_capability('cuda') == (8, 0)
def generate_random_padding_mask(max_seqlen, batch_size, device, mode='random'):
assert mode in ['full', 'random', 'third', 'split']
if mode == 'full':
lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32)
elif mode == 'random':
lengths = torch.randint(max(1, max_seqlen - 20), max_seqlen + 1, (batch_size, 1), device=device)
elif mode == 'third':
lengths = torch.randint(max_seqlen // 3, max_seqlen + 1, (batch_size, 1), device=device)
elif mode == 'split':
lengths0 = torch.randint(min(128, max_seqlen), max_seqlen + 1,
(batch_size // 4 * 3, 1), device=device)
lengths1 = torch.randint(min(max(1, max_seqlen - 20), 128), min(max_seqlen, 128) + 1,
(batch_size - batch_size // 4 * 3, 1), device=device)
lengths = torch.cat([lengths0, lengths1], dim=0)
padding_mask = repeat(torch.arange(max_seqlen, device=device), 's -> b s', b=batch_size) < lengths
return padding_mask
def generate_qkv(x, Wqkv, nheads, query_padding_mask=None, key_padding_mask=None,
kvpacked=False, qkvpacked=False):
"""
Arguments:
x: (batch_size, seqlen, nheads * d)
Wqkv: nn.Linear(nheads * d, 3 * nheads * d)
query_padding_mask: (batch_size, seqlen), bool
key_padding_mask: (batch_size, seqlen), bool
"""
assert not (kvpacked and qkvpacked)
batch_size, seqlen, dim = x.shape
q, k, v = Wqkv(x).chunk(3, dim=-1)
if query_padding_mask is not None:
q_unpad, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, query_padding_mask)
q_unpad = rearrange(q_unpad, 'nnz (h d) -> nnz h d', h=nheads)
output_pad_fn = lambda output_unpad: rearrange(
pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen),
'b s (h d) -> b s h d', h=nheads
)
else:
q_unpad = rearrange(q, 'b s (h d) -> (b s) h d', h=nheads)
cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=q_unpad.device)
max_seqlen_q = seqlen
output_pad_fn = lambda output_unpad: rearrange(output_unpad, '(b s) h d -> b s h d', b=batch_size)
if key_padding_mask is not None:
k_unpad, indices_k, cu_seqlens_k, max_seqlen_k = unpad_input(k, key_padding_mask)
k_unpad = rearrange(k_unpad, 'nnz (h d) -> nnz h d', h=nheads)
v_unpad, _, _, _ = unpad_input(v, key_padding_mask)
v_unpad = rearrange(v_unpad, 'nnz (h d) -> nnz h d', h=nheads)
else:
k_unpad = rearrange(k, 'b s (h d) -> (b s) h d', h=nheads)
v_unpad = rearrange(v, 'b s (h d) -> (b s) h d', h=nheads)
cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=q_unpad.device)
max_seqlen_k = seqlen
if qkvpacked:
assert (query_padding_mask == key_padding_mask).all()
qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1)
qkv = rearrange(torch.stack([q, k, v], dim=2), 'b s t (h d) -> b s t h d', h=nheads)
if query_padding_mask is not None:
dqkv_pad_fn = lambda dqkv_unpad: rearrange(
pad_input(rearrange(dqkv_unpad, 'nnz t h d -> nnz (t h d)'), indices_q, batch_size, seqlen),
'b s (t h d) -> b s t h d', t=3, h=nheads
)
else:
dqkv_pad_fn = lambda dqkv_unpad: rearrange(dqkv_unpad, '(b s) t h d -> b s t h d', b=batch_size)
return (qkv_unpad.detach().requires_grad_(), cu_seqlens_q, max_seqlen_q,
qkv.detach().requires_grad_(), output_pad_fn, dqkv_pad_fn)
elif kvpacked:
kv_unpad = torch.stack([k_unpad, v_unpad], dim=1)
q = rearrange(q, 'b s (h d) -> b s h d', h=nheads)
kv = rearrange(torch.stack([k, v], dim=2), 'b s t (h d) -> b s t h d', h=nheads)
dq_pad_fn = output_pad_fn
if key_padding_mask is not None:
dkv_pad_fn = lambda dkv_unpad: rearrange(
pad_input(rearrange(dkv_unpad, 'nnz t h d -> nnz (t h d)'), indices_k, batch_size, seqlen),
'b s (t h d) -> b s t h d', t=2, h=nheads
)
else:
dkv_pad_fn = lambda dkv_unpad: rearrange(dkv_unpad, '(b s) t h d -> b s t h d', b=batch_size)
return (q_unpad.detach().requires_grad_(), kv_unpad.detach().requires_grad_(),
cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
q.detach().requires_grad_(), kv.detach().requires_grad_(),
output_pad_fn, dq_pad_fn, dkv_pad_fn)
else:
q, k, v = [rearrange(z, 'b s (h d) -> b s h d', h=nheads).detach().requires_grad_()
for z in [q, k, v]]
dq_pad_fn = output_pad_fn
if key_padding_mask is not None:
dk_pad_fn = lambda dk_unpad: rearrange(
pad_input(rearrange(dk_unpad, 'nnz h d -> nnz (h d)'), indices_k, batch_size, seqlen),
'b s (h d) -> b s h d', h=nheads
)
else:
dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, '(b s) h d -> b s h d', b=batch_size)
return (q_unpad.detach().requires_grad_(), k_unpad.detach().requires_grad_(),
v_unpad.detach().requires_grad_(),
cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
q, k, v,
output_pad_fn, dq_pad_fn, dk_pad_fn)
def attention_ref(q, k, v, query_padding_mask=None, key_padding_mask=None, dropout_p=0.0,
dropout_mask=None, causal=False, bias=None, upcast=True, reorder_ops=False):
"""
Arguments:
q: (batch_size, seqlen_q, nheads, head_dim)
k: (batch_size, seqlen_k, nheads, head_dim)
v: (batch_size, seqlen_k, nheads, head_dim)
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
dropout_p: float
dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k)
bias: (batch_size, nheads, seqlen_q, seqlen_k)
upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast
output back to fp16/bf16.
reorder_ops: whether to change the order of operations (scaling k instead of scaling k, etc.)
without changing the math. This is to estimate the numerical error from operation
reordering.
Output:
output: (batch_size, seqlen_q, nheads, head_dim)
attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout
"""
dtype_og = q.dtype
if upcast:
q, k, v = q.float(), k.float(), v.float()
seqlen_q, seqlen_k = q.shape[1], k.shape[1]
d = q.shape[-1]
if not reorder_ops:
scores = torch.einsum('bthd,bshd->bhts', q / math.sqrt(d), k)
else:
scores = torch.einsum('bthd,bshd->bhts', q, k / math.sqrt(d))
if bias is not None:
scores = (scores + bias).to(dtype=scores.dtype)
if key_padding_mask is not None:
scores.masked_fill_(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), float('-inf'))
if causal:
causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, device=q.device), 1)
scores.masked_fill_(causal_mask, float('-inf'))
attention = torch.softmax(scores, dim=-1)
dropout_scaling = 1.0 / (1 - dropout_p)
# attention_drop = attention.masked_fill(~dropout_mask, 0.0) * dropout_scaling
# output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
if dropout_mask is not None:
attention_drop = attention.masked_fill(~dropout_mask, 0.0)
else:
attention_drop = attention
output = torch.einsum('bhts,bshd->bthd', attention_drop, v * dropout_scaling)
if query_padding_mask is not None:
output.masked_fill_(rearrange(~query_padding_mask, 'b s -> b s 1 1'), 0.0)
attention = attention.masked_fill(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), 0.0)
return output.to(dtype=dtype_og), attention.to(dtype=dtype_og)
def attention_kvpacked_ref(q, kv, query_padding_mask=None, key_padding_mask=None, dropout_p=0.0,
dropout_mask=None, causal=False, upcast=True, reorder_ops=False):
return attention_ref(q, kv[:, :, 0], kv[:, :, 1], query_padding_mask,
key_padding_mask, dropout_p, dropout_mask, upcast=upcast, causal=causal,
reorder_ops=reorder_ops)
def attention_qkvpacked_ref(qkv, key_padding_mask=None, dropout_p=0.0,
dropout_mask=None, causal=False, upcast=True, reorder_ops=False):
return attention_ref(qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], key_padding_mask,
key_padding_mask, dropout_p, dropout_mask, upcast=upcast, causal=causal,
reorder_ops=reorder_ops)
def generate_sparsity_mask(seqlen, sparsity=0.3):
repeats = seqlen // 16 // 2
# mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda'),
# torch.tensor([0, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
# mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda'),
# torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
# mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
# mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
nrow, ncol = seqlen // 16, seqlen // 256
mask = torch.rand(nrow, ncol, device='cuda') < sparsity
return mask
def attention_blocksparse_ref(qkv, blockmask, attn_mask, dropout_p, dropout_mask):
"""
Arguments:
qkv: (batch_size, seqlen, 3, nheads, head_dim)
blockmask: (seqlen / 16, seqlen / 256)
attn_mask: (batch_size, seqlen)
dropout_p: float
dropout_mask: (batch_size, nheads, seqlen, seqlen)
Output:
output: (batch_size, seqlen, nheads, head_dim)
attention: softmax after dropout
"""
q, k, v = qkv.float().unbind(dim=2)
d = qkv.shape[-1]
seqlen = qkv.shape[1]
scores = torch.einsum('bthd,bshd->bhts', q / math.sqrt(d), k)
scores.masked_fill_(rearrange(~attn_mask, 'b s -> b 1 1 s'), float('-inf'))
blockmask = repeat(blockmask, 's_16 s_256 -> (s_16 16) (s_256 256)')
blockmask = blockmask[:seqlen, :seqlen]
scores.masked_fill_(rearrange(~blockmask, 't s -> 1 1 t s'), float('-inf'))
attention = torch.softmax(scores, dim=-1)
attention = attention.masked_fill(rearrange(~attn_mask, 'b s -> b 1 s 1'), 0.0)
attention = attention.masked_fill_(rearrange(~blockmask, 't s -> 1 1 t s'), 0.0)
attention_drop = attention.masked_fill(~dropout_mask, 0.0) / (1 - dropout_p)
output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
output.masked_fill_(rearrange(~attn_mask, 'b s -> b s 1 1'), 0)
return output.to(dtype=qkv.dtype), attention.to(dtype=qkv.dtype)
def convert_flash_attn_S_to_softmax(S, query_padding_mask, key_padding_mask, head_dim, is_dropout,
causal=False):
"""FlashAttention stores the S matrix in a different way.
Arguments:
S: (batch_size, nheads, seqlen_q, seqlen_k)
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
"""
S_flat = rearrange(S, 'b h t s -> b h (t s)')
seqlen_q, seqlen_k = S.shape[-2:]
block_size = _get_block_size(S.device, head_dim, is_dropout)
loop_steps = (seqlen_k + block_size - 1) // block_size
warps_n = 4
mmas_n = (seqlen_k // warps_n // 16) if seqlen_k <= block_size else (block_size // warps_n // 16)
S_converted = rearrange(S_flat, 'b h (loop nsteps mmas_n warps_n eight t r c0 c1) -> b h (nsteps r eight) (loop mmas_n warps_n c0 t c1)',
loop=loop_steps, nsteps=seqlen_q // 16, mmas_n=mmas_n, warps_n=warps_n, eight=8, t=4,
r=2, c0=2, c1=2)
# Need to zero out things not in attention_mask in case S was initialized with random values
# and some of those values aren't overwritten.
seqlen_q_og = query_padding_mask.shape[-1]
if seqlen_q_og < seqlen_q:
query_padding_mask = F.pad(query_padding_mask, (0, seqlen_q - seqlen_q_og))
else:
query_padding_mask = query_padding_mask[:, :seqlen_q]
S_converted = S_converted.masked_fill(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), 0.0)
seqlen_k_og = key_padding_mask.shape[-1]
if seqlen_k_og < seqlen_k:
key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k - seqlen_k_og))
else:
key_padding_mask = key_padding_mask[:, :seqlen_k]
S_converted = S_converted.masked_fill(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), 0.0)
if causal:
causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, device=S.device), 1)
S_converted.masked_fill_(causal_mask, 0.0)
if seqlen_q_og < seqlen_q:
S_converted = S_converted[:, :, :seqlen_q_og, :]
else:
S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q))
if seqlen_k_og < seqlen_k:
S_converted = S_converted[:, :, :, :seqlen_k_og]
else:
S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k))
return S_converted
def normalize_flash_attn_S(attn_unnorm, q, k, v, query_padding_mask=None, key_padding_mask=None,
is_dropout=False, causal=False):
"""
Arguments:
q: (batch_size, seqlen_q, nheads, head_dim)
k, v: (batch_size, seqlen_k, nheads, head_dim)
key_padding_mask: (batch_size, seqlen_q)
Output:
softmax_lse: (batch_size, nheads, seqlen_q)
softmax_max: (batch_size, nheads, seqlen_q)
"""
q, k, v = q.float(), k.float(), v.float()
_, seqlen_q, _, head_dim = q.shape
seqlen_k = k.shape[1]
scores = torch.einsum('bthd,bshd->bhts', q / math.sqrt(head_dim), k)
if key_padding_mask is not None:
scores.masked_fill_(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), float('-inf'))
if causal:
causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool, device=q.device), 1)
scores.masked_fill_(causal_mask, float('-inf'))
block_size = _get_block_size(scores.device, head_dim, is_dropout)
scores_block = scores.split(block_size, dim=-1)
lse_block = torch.stack([torch.logsumexp(s, dim=-1) for s in scores_block], dim=-1)
lcse_block = torch.logcumsumexp(lse_block, dim=-1).unbind(dim=-1)
scores_max_block = ([torch.amax(scores_block[0], dim=-1)]
+ [torch.maximum(torch.amax(s, dim=-1), lcse)
for s, lcse in zip(scores_block[1:], lcse_block[:-1])])
attn_unnorm_block = attn_unnorm.split(block_size, dim=-1)
attn_norm = torch.cat([a / rearrange(torch.exp(lcse_block[-1] - m), 'b h s -> b h s 1')
for a, m in zip(attn_unnorm_block, scores_max_block)], dim=-1)
if query_padding_mask is not None:
attn_norm.masked_fill_(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), 0.0)
return attn_norm.to(dtype=attn_unnorm.dtype)
def get_dropout_fraction(dropout_mask, query_padding_mask=None, key_padding_mask=None, causal=False):
"""
dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k), bool. True means keep, False means drop.
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
"""
batch_size, nheads, seqlen_q, seqlen_k = dropout_mask.shape
dropped = ~dropout_mask
if query_padding_mask is not None:
dropped.masked_fill_(rearrange(~query_padding_mask, 'b s -> b 1 s 1'), False)
if key_padding_mask is not None:
dropped.masked_fill_(rearrange(~key_padding_mask, 'b s -> b 1 1 s'), False)
if causal:
causal_mask = torch.triu(torch.ones(seqlen_q, seqlen_k, dtype=torch.bool,
device=dropout_mask.device), 1)
dropped.masked_fill_(causal_mask, False)
dropped_total = dropped.sum()
query_lengths = (query_padding_mask.sum(dim=-1) if query_padding_mask is not None
else torch.full((batch_size,), seqlen_q, device=dropout_mask.device))
key_lengths = (key_padding_mask.sum(dim=-1) if key_padding_mask is not None
else torch.full((batch_size,), seqlen_k, device=dropout_mask.device))
if not causal:
numel_per_batch = query_lengths * key_lengths
else:
numel_per_batch = torch.where(
query_lengths <= key_lengths,
query_lengths * (query_lengths + 1) / 2,
query_lengths * key_lengths - (key_lengths * (key_lengths - 1) / 2)
)
return dropped_total / (numel_per_batch.sum() * nheads)
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('causal', [False, True])
# @pytest.mark.parametrize('causal', [False])
@pytest.mark.parametrize('d', [128, 64, 80, 40, 32, 16])
# @pytest.mark.parametrize('d', [64])
@pytest.mark.parametrize('seqlen', [97, 128, 200, 256, 257, 384, 512, 768, 1024, 1025, 2048])
# @pytest.mark.parametrize('seqlen', [128])
@pytest.mark.parametrize('dropout_p', [0.0, 0.17])
# @pytest.mark.parametrize('dropout_p', [0.0])
def test_flash_attn_unpadded_qkvpacked(seqlen, d, dropout_p, causal, dtype):
if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# if dtype == torch.float16:
# rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3)
# else: # torch.bfloat16
# rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
# Set smaller batch size so it would trigger num_splits > 1
batch_size = 8
nheads = 4
x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
# key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full')
qkv_unpad, cu_seqlens, max_seqlen, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv(
x, Wqkv, nheads, key_padding_mask, key_padding_mask, qkvpacked=True
)
output_unpad, sm_lse, S_dmask = flash_attn_unpadded_qkvpacked_func(
qkv_unpad, cu_seqlens, max_seqlen, dropout_p, return_attn_probs=True, causal=causal
)
output = output_pad_fn(output_unpad)
S_dmask_converted = convert_flash_attn_S_to_softmax(
S_dmask, key_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal
)
dropout_mask = S_dmask_converted >= 0
attn_unnorm = S_dmask_converted.abs()
attn = normalize_flash_attn_S(attn_unnorm, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2],
key_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal)
dropout_fraction = get_dropout_fraction(dropout_mask, key_padding_mask, key_padding_mask,
causal=causal).item()
output_ref, attn_ref = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask,
causal=causal)
output_pt, attn_pt = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask,
causal=causal, upcast=False, reorder_ops=True)
print(f'Actual dropout fraction: {dropout_fraction}')
print(f'Output max diff: {(output - output_ref).abs().max().item()}')
print(f'Output mean diff: {(output - output_ref).abs().mean().item()}')
print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}')
print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}')
print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}')
print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}')
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
g = torch.randn_like(output)
dqkv_unpad, = torch.autograd.grad(output, qkv_unpad, g)
dqkv = dqkv_pad_fn(dqkv_unpad)
dqkv_ref, = torch.autograd.grad(output_ref, qkv, g)
dqkv_pt, = torch.autograd.grad(output_pt, qkv, g)
print(f'dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}')
print(f'dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}')
print(f'dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}')
print(f'dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}')
print(f'dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}')
print(f'dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}')
print(f'dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}')
print(f'dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}')
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item()
# assert torch.allclose(output, output_ref, rtol=rtol, atol=atol)
assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
# assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol)
if dropout_p == 0.0:
assert dropout_mask.all()
else:
assert 0.98 <= dropout_fraction / dropout_p <= 1.02
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
# Error for dK and dV could be a bit higher if we're splitting along seqlen_q dimension
assert (dqkv - dqkv_ref).abs().max().item() <= 4 * (dqkv_pt - dqkv_ref).abs().max().item()
# assert torch.allclose(dqkv, dqkv_ref, rtol=rtol, atol=atol)
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('causal', [False, True])
@pytest.mark.parametrize('d', [128, 64, 80, 40, 32, 16])
# @pytest.mark.parametrize('d', [64])
@pytest.mark.parametrize('seqlen', [97, 128, 200, 256, 257, 384, 512, 768, 1024, 1025, 2048])
# @pytest.mark.parametrize('seqlen', [128])
@pytest.mark.parametrize('dropout_p', [0.0, 0.17])
# @pytest.mark.parametrize('dropout_p', [0.0])
def test_flash_attn_unpadded_kvpacked(seqlen, d, dropout_p, causal, dtype):
if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# if dtype == torch.float16:
# rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3)
# else: # torch.bfloat16
# rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
query_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
(q_unpad, kv_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, q, kv,
output_pad_fn, dq_pad_fn, dkv_pad_fn) = generate_qkv(
x, Wqkv, nheads, query_padding_mask, key_padding_mask, kvpacked=True
)
output_unpad, sm_lse, S_dmask = flash_attn_unpadded_kvpacked_func(
q_unpad, kv_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, return_attn_probs=True, causal=causal
)
output = output_pad_fn(output_unpad)
S_dmask_converted = convert_flash_attn_S_to_softmax(
S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal
)
dropout_mask = S_dmask_converted >= 0
attn_unnorm = S_dmask_converted.abs()
attn = normalize_flash_attn_S(attn_unnorm, q, kv[:, :, 0], kv[:, :, 1],
query_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal)
dropout_fraction = get_dropout_fraction(dropout_mask, query_padding_mask, key_padding_mask,
causal=causal)
output_ref, attn_ref = attention_kvpacked_ref(q, kv, query_padding_mask, key_padding_mask,
dropout_p, dropout_mask, causal=causal)
output_pt, attn_pt = attention_kvpacked_ref(q, kv, query_padding_mask, key_padding_mask,
dropout_p, dropout_mask, causal=causal,
upcast=False, reorder_ops=True)
print(f'Actual dropout fraction: {dropout_fraction}')
print(f'Output max diff: {(output - output_ref).abs().max().item()}')
print(f'Output mean diff: {(output - output_ref).abs().mean().item()}')
print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}')
print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}')
print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}')
print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}')
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
g = torch.randn_like(output)
dq_unpad, dkv_unpad, = torch.autograd.grad(output, (q_unpad, kv_unpad), g)
dq = dq_pad_fn(dq_unpad)
dkv = dkv_pad_fn(dkv_unpad)
dq_ref, dkv_ref, = torch.autograd.grad(output_ref, (q, kv), g)
dq_pt, dkv_pt = torch.autograd.grad(output_pt, (q, kv), g)
print(f'dQ max diff: {(dq - dq_ref).abs().max().item()}')
print(f'dK max diff: {(dkv[:, :, 0] - dkv_ref[:, :, 0]).abs().max().item()}')
print(f'dV max diff: {(dkv[:, :, 1] - dkv_ref[:, :, 1]).abs().max().item()}')
print(f'dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}')
print(f'dK Pytorch max diff: {(dkv_pt[:, :, 0] - dkv_ref[:, :, 0]).abs().max().item()}')
print(f'dV Pytorch max diff: {(dkv_pt[:, :, 1] - dkv_ref[:, :, 1]).abs().max().item()}')
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item()
# assert torch.allclose(output, output_ref, rtol=rtol, atol=atol)
assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
# assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol)
if dropout_p == 0.0:
assert dropout_mask.all()
else:
assert 0.99 <= dropout_fraction / dropout_p <= 1.01
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item()
assert (dkv - dkv_ref).abs().max().item() <= 2 * (dkv_pt - dkv_ref).abs().max().item()
# assert torch.allclose(dq, dq_ref, rtol=rtol, atol=atol)
# assert torch.allclose(dkv, dkv_ref, rtol=rtol, atol=atol)
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('causal', [False, True])
@pytest.mark.parametrize('d', [128, 64, 80, 40, 32, 16])
# @pytest.mark.parametrize('d', [64])
@pytest.mark.parametrize('seqlen', [97, 128, 200, 256, 257, 384, 512, 768, 1024, 1025, 2048])
# @pytest.mark.parametrize('seqlen', [128])
@pytest.mark.parametrize('dropout_p', [0.0, 0.17])
# @pytest.mark.parametrize('dropout_p', [0.0])
def test_flash_attn_unpadded(seqlen, d, dropout_p, causal, dtype):
if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# if dtype == torch.float16:
# rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3)
# else: # torch.bfloat16
# rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
query_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
(q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, q, k, v,
output_pad_fn, dq_pad_fn, dk_pad_fn) = generate_qkv(
x, Wqkv, nheads, query_padding_mask, key_padding_mask
)
output_unpad, sm_lse, S_dmask = flash_attn_unpadded_func(
q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, return_attn_probs=True, causal=causal
)
output = output_pad_fn(output_unpad)
S_dmask_converted = convert_flash_attn_S_to_softmax(
S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal
)
dropout_mask = S_dmask_converted >= 0
attn_unnorm = S_dmask_converted.abs()
attn = normalize_flash_attn_S(attn_unnorm, q, k, v, query_padding_mask, key_padding_mask,
dropout_p > 0.0, causal=causal)
dropout_fraction = get_dropout_fraction(dropout_mask, query_padding_mask, key_padding_mask,
causal=causal)
output_ref, attn_ref = attention_ref(q, k, v, query_padding_mask, key_padding_mask,
dropout_p, dropout_mask, causal=causal)
output_pt, attn_pt = attention_ref(q, k, v, query_padding_mask, key_padding_mask,
dropout_p, dropout_mask, causal=causal,
upcast=False, reorder_ops=True)
print(f'Actual dropout fraction: {dropout_fraction}')
print(f'Output max diff: {(output - output_ref).abs().max().item()}')
print(f'Output mean diff: {(output - output_ref).abs().mean().item()}')
print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}')
print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}')
print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}')
print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}')
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
g = torch.randn_like(output)
dq_unpad, dk_unpad, dv_unpad, = torch.autograd.grad(output, (q_unpad, k_unpad, v_unpad), g)
dq = dq_pad_fn(dq_unpad)
dk = dk_pad_fn(dk_unpad)
dv = dk_pad_fn(dv_unpad)
dq_ref, dk_ref, dv_ref, = torch.autograd.grad(output_ref, (q, k, v), g)
dq_pt, dk_pt, dv_pt, = torch.autograd.grad(output_pt, (q, k, v), g)
print(f'dQ max diff: {(dq - dq_ref).abs().max().item()}')
print(f'dK max diff: {(dk - dk_ref).abs().max().item()}')
print(f'dV max diff: {(dv - dv_ref).abs().max().item()}')
print(f'dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}')
print(f'dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}')
print(f'dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}')
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item()
# assert torch.allclose(output, output_ref, rtol=rtol, atol=atol)
assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
# assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol)
if dropout_p == 0.0:
assert dropout_mask.all()
else:
assert 0.99 <= dropout_fraction / dropout_p <= 1.01
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item()
assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item()
assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item()
# assert torch.allclose(dq, dq_ref, rtol=rtol, atol=atol)
# assert torch.allclose(dk, dk_ref, rtol=rtol, atol=atol)
# assert torch.allclose(dv, dv_ref, rtol=rtol, atol=atol)
@pytest.mark.skipif(True, reason='Experimental, not being used')
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('causal', [False, True])
# @pytest.mark.parametrize('causal', [False])
@pytest.mark.parametrize('d', [128, 64, 80, 40, 32, 16])
# @pytest.mark.parametrize('d', [64])
@pytest.mark.parametrize('seqlen', [512])
@pytest.mark.parametrize('dropout_p', [0.0, 0.17])
# @pytest.mark.parametrize('dropout_p', [0.0])
def test_flash_attn_split(seqlen, d, dropout_p, causal, dtype):
if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# if dtype == torch.float16:
# rtol, atol = (1e-3, 3e-4) if not causal else (1e-3, 1e-3)
# else: # torch.bfloat16
# rtol, atol = (3e-3, 3e-3) if not causal else (1e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='split')
batch_size0 = batch_size // 4 * 3 # this must match what's in generate_random_padding_mask
# key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full')
qkv_unpad, cu_seqlens, max_seqlen0, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv(
x, Wqkv, nheads, key_padding_mask, key_padding_mask, qkvpacked=True
)
max_seqlen1 = 128
output_unpad, sm_lse, S_dmask0, S_dmask1 = flash_attn_unpadded_qkvpacked_split_func(
qkv_unpad, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0, dropout_p,
return_attn_probs=True, causal=causal
)
output = output_pad_fn(output_unpad)
S_dmask0_converted = convert_flash_attn_S_to_softmax(
S_dmask0, key_padding_mask[:batch_size0], key_padding_mask[:batch_size0], d, dropout_p > 0.0, causal=causal
)
S_dmask1_converted = convert_flash_attn_S_to_softmax(
S_dmask1, key_padding_mask[batch_size0:, :max_seqlen1], key_padding_mask[batch_size0:, :max_seqlen1], d, dropout_p > 0.0, causal=causal
)
padding = (S_dmask0_converted.shape[-1] - S_dmask1_converted.shape[-1],
S_dmask0_converted.shape[-2] - S_dmask1_converted.shape[-2])
S_dmask_converted = torch.cat([S_dmask0_converted,
F.pad(S_dmask1_converted, (0, padding[0], 0, padding[1]))], dim=0)
dropout_mask = S_dmask_converted >= 0
attn_unnorm = S_dmask_converted.abs()
attn = normalize_flash_attn_S(attn_unnorm, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2],
key_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal)
dropout_fraction = get_dropout_fraction(dropout_mask, key_padding_mask, key_padding_mask,
causal=causal).item()
output_ref, attn_ref = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask,
causal=causal)
output_pt, attn_pt = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask,
causal=causal, upcast=False, reorder_ops=True)
print(f'Actual dropout fraction: {dropout_fraction}')
print(f'Output max diff: {(output - output_ref).abs().max().item()}')
print(f'Output mean diff: {(output - output_ref).abs().mean().item()}')
print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}')
print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}')
print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}')
print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}')
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
g = torch.randn_like(output)
dqkv_unpad, = torch.autograd.grad(output, qkv_unpad, g)
dqkv = dqkv_pad_fn(dqkv_unpad)
dqkv_ref, = torch.autograd.grad(output_ref, qkv, g)
dqkv_pt, = torch.autograd.grad(output_pt, qkv, g)
print(f'dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}')
print(f'dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}')
print(f'dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}')
print(f'dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}')
print(f'dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}')
print(f'dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}')
print(f'dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}')
print(f'dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}')
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item()
# assert torch.allclose(output, output_ref, rtol=rtol, atol=atol)
assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
# assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol)
if dropout_p == 0.0:
assert dropout_mask.all()
else:
assert 0.99 <= dropout_fraction / dropout_p <= 1.01
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item()
# assert torch.allclose(dqkv, dqkv_ref, rtol=rtol, atol=atol)
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('causal', [False, True])
@pytest.mark.parametrize('d', [128, 64, 80, 40, 32, 16])
# @pytest.mark.parametrize('d', [64])
@pytest.mark.parametrize('seqlen', [97, 128, 200, 256, 257, 384, 512, 768, 1024, 1025, 2048])
# @pytest.mark.parametrize('seqlen', [128])
@pytest.mark.parametrize('dropout_p', [0.0, 0.17])
# @pytest.mark.parametrize('dropout_p', [0.0])
def test_flash_attn_race_condition(seqlen, d, dropout_p, causal, dtype):
if seqlen >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# set seed
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
query_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
(q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, q, k, v,
output_pad_fn, dq_pad_fn, dk_pad_fn) = generate_qkv(
x, Wqkv, nheads, query_padding_mask, key_padding_mask
)
torch.random.manual_seed(0)
output_unpad_0, sm_lse_0, S_dmask_0 = flash_attn_unpadded_func(
q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, return_attn_probs=True, causal=causal
)
S_dmask_converted_0 = convert_flash_attn_S_to_softmax(
S_dmask_0, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal
)
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
g = torch.randn_like(output_unpad_0)
dq_unpad_0, dk_unpad_0, dv_unpad_0, = torch.autograd.grad(output_unpad_0,
(q_unpad, k_unpad, v_unpad), g)
# Parallelizing over seqlen_k makes dq non-deterministic
deterministic_dq = False
# Numerical error if we just do any arithmetic on dq
dq_atol = ((dq_unpad_0 + 0.3 - 0.3) - dq_unpad_0).abs().max().item()
equal_fn = torch.equal if deterministic_dq else partial(torch.allclose, atol=dq_atol)
for _ in range(10):
torch.random.manual_seed(0)
output_unpad, sm_lse, S_dmask = flash_attn_unpadded_func(
q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, return_attn_probs=True, causal=causal
)
S_dmask_converted = convert_flash_attn_S_to_softmax(
S_dmask, query_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal
)
assert torch.equal(output_unpad, output_unpad_0)
# sm_lse has some parts that are uninitialized from torch.empty
# assert torch.equal(sm_lse, sm_lse_0)
assert torch.equal(S_dmask_converted, S_dmask_converted_0)
if is_sm80 or d <= 64: # Only run backward for d=128 on A100
dq_unpad, dk_unpad, dv_unpad, = torch.autograd.grad(output_unpad,
(q_unpad, k_unpad, v_unpad), g)
assert equal_fn(dq_unpad, dq_unpad_0)
assert torch.equal(dk_unpad, dk_unpad_0)
assert torch.equal(dv_unpad, dv_unpad_0)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='requires multiple GPUs')
def test_flash_attn_multigpu():
seqlen = 256
d = 64
dropout_p = 0.0
causal = False
dtype = torch.float16
device = 'cuda:1'
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
x = torch.randn(batch_size, seqlen, nheads * d, device=device, dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='random')
# key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full')
qkv_unpad, cu_seqlens, max_seqlen, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv(
x, Wqkv, nheads, key_padding_mask, key_padding_mask, qkvpacked=True
)
output_unpad, sm_lse, S_dmask = flash_attn_unpadded_qkvpacked_func(
qkv_unpad, cu_seqlens, max_seqlen, dropout_p, return_attn_probs=True, causal=causal
)
output = output_pad_fn(output_unpad)
S_dmask_converted = convert_flash_attn_S_to_softmax(
S_dmask, key_padding_mask, key_padding_mask, d, dropout_p > 0.0, causal=causal
)
dropout_mask = S_dmask_converted >= 0
attn_unnorm = S_dmask_converted.abs()
attn = normalize_flash_attn_S(attn_unnorm, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2],
key_padding_mask, key_padding_mask, dropout_p > 0.0, causal=causal)
dropout_fraction = get_dropout_fraction(dropout_mask, key_padding_mask, key_padding_mask,
causal=causal).item()
output_ref, attn_ref = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask,
causal=causal)
output_pt, attn_pt = attention_qkvpacked_ref(qkv, key_padding_mask, dropout_p, dropout_mask,
causal=causal, upcast=False, reorder_ops=True)
print(f'Actual dropout fraction: {dropout_fraction}')
print(f'Output max diff: {(output - output_ref).abs().max().item()}')
print(f'Output mean diff: {(output - output_ref).abs().mean().item()}')
print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}')
print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}')
print(f'Attention max diff: {(attn - attn_ref).abs().max().item()}')
print(f'Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}')
g = torch.randn_like(output)
dqkv_unpad, = torch.autograd.grad(output, qkv_unpad, g)
dqkv = dqkv_pad_fn(dqkv_unpad)
dqkv_ref, = torch.autograd.grad(output_ref, qkv, g)
dqkv_pt, = torch.autograd.grad(output_pt, qkv, g)
print(f'dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}')
print(f'dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}')
print(f'dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}')
print(f'dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}')
print(f'dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}')
print(f'dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}')
print(f'dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}')
print(f'dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}')
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item()
# assert torch.allclose(output, output_ref, rtol=rtol, atol=atol)
assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
# assert torch.allclose(attn, attn_ref, rtol=rtol, atol=atol)
if dropout_p == 0.0:
assert dropout_mask.all()
else:
assert 0.99 <= dropout_fraction / dropout_p <= 1.01
assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item()
@pytest.mark.skipif(flash_attn_func is None, reason='Triton is not installed or is too old')
@pytest.mark.skipif(not is_sm80, reason='Triton version is only tested on A100')
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.bfloat16])
@pytest.mark.parametrize('causal', [False, True])
# @pytest.mark.parametrize('causal', [True])
@pytest.mark.parametrize('d', [40, 48, 64, 128, 80, 88, 96])
# @pytest.mark.parametrize('d', [48])
@pytest.mark.parametrize('seqlen_q,seqlen_k', [(113, 203), (128, 217), (113, 211), (108, 256), (256, 512), (512, 256), (1024, 1024), (1023, 1024), (1024, 1023), (2048, 2048)])
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(1024, 1023)])
@pytest.mark.parametrize('bias_shape', ([None, '1h1k', '1hqk', 'b11k', 'b1qk']))
# @pytest.mark.parametrize('bias_shape', (['1hqk']))
def test_flash_attn_triton_output(seqlen_q, seqlen_k, d, causal, dtype, bias_shape):
if seqlen_q >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# set seed
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype)
k, v = torch.randn(batch_size, seqlen_k, 2, nheads, d, device=device, dtype=dtype).unbind(dim=2)
if bias_shape == '1h1k':
bias = torch.randn(1, nheads, 1, seqlen_k, dtype=torch.float, device=device)
elif bias_shape == '1hqk':
bias = torch.randn(1, nheads, seqlen_q, seqlen_k, dtype=torch.float, device=device)
elif bias_shape == 'b11k':
bias = torch.randn(batch_size, 1, 1, seqlen_k, dtype=torch.float, device=device)
elif bias_shape == 'b1qk':
bias = torch.randn(batch_size, 1, seqlen_q, seqlen_k, dtype=torch.float, device=device)
else:
bias = None
q, k, v = [x.detach().requires_grad_() for x in [q, k, v]]
output = flash_attn_func(q, k, v, bias, causal)
output_ref, attn_ref = attention_ref(q, k, v, bias=bias, causal=causal)
output_pt, attn_pt = attention_ref(q, k, v, bias=bias, causal=causal, upcast=False,
reorder_ops=True)
print(f'Output max diff: {(output - output_ref).abs().max().item()}')
print(f'Output mean diff: {(output - output_ref).abs().mean().item()}')
print(f'Pytorch max diff: {(output_pt - output_ref).abs().max().item()}')
print(f'Pytorch mean diff: {(output_pt - output_ref).abs().mean().item()}')
g = torch.randn_like(output)
dq, dk, dv = torch.autograd.grad(output, (q, k, v), g)
dq_ref, dk_ref, dv_ref, = torch.autograd.grad(output_ref, (q, k, v), g)
dq_pt, dk_pt, dv_pt, = torch.autograd.grad(output_pt, (q, k, v), g)
print(f'dQ max diff: {(dq - dq_ref).abs().max().item()}')
print(f'dK max diff: {(dk - dk_ref).abs().max().item()}')
print(f'dV max diff: {(dv - dv_ref).abs().max().item()}')
print(f'dQ mean diff: {(dq - dq_ref).abs().mean().item()}')
print(f'dK mean diff: {(dk - dk_ref).abs().mean().item()}')
print(f'dV mean diff: {(dv - dv_ref).abs().mean().item()}')
print(f'dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}')
print(f'dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}')
print(f'dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}')
print(f'dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}')
print(f'dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}')
print(f'dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}')
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (output - output_ref).abs().max().item() <= 2 * (output_pt - output_ref).abs().max().item()
# assert torch.allclose(output, output_ref, rtol=rtol, atol=atol)
assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item()
assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item()
assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item()
@pytest.mark.skipif(flash_attn_func is None, reason='Triton is not installed or is too old')
@pytest.mark.skipif(not is_sm80, reason='Triton version is only tested on A100')
@pytest.mark.parametrize('dtype', ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize('dtype', [torch.bfloat16])
@pytest.mark.parametrize('causal', [False, True])
# @pytest.mark.parametrize('causal', [True])
@pytest.mark.parametrize('d', [40, 48, 64, 128, 80, 88, 96])
# @pytest.mark.parametrize('d', [64])
@pytest.mark.parametrize('seqlen_q,seqlen_k', [(113, 203), (128, 217), (91, 211), (108, 256), (256, 512), (512, 256), (1024, 1024), (1023, 1024), (1024, 1023), (2048, 2048)])
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(113, 203)])
@pytest.mark.parametrize('bias_shape', ([None, '1h1k', '1hqk', 'b11k', 'b1qk']))
# @pytest.mark.parametrize('bias_shape', (['b1qk']))
def test_flash_attn_triton_race_condition(seqlen_q, seqlen_k, d, causal, dtype, bias_shape):
if seqlen_q >= 2048 and torch.cuda.get_device_properties('cuda').total_memory <= 16 * 2**30:
pytest.skip() # Reference implementation OOM
device = 'cuda'
# set seed
torch.random.manual_seed(0)
batch_size = 32
nheads = 4
q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype)
k, v = torch.randn(batch_size, seqlen_k, 2, nheads, d, device=device, dtype=dtype).unbind(dim=2)
if bias_shape == '1h1k':
bias = torch.randn(1, nheads, 1, seqlen_k, dtype=torch.float, device=device)
elif bias_shape == '1hqk':
bias = torch.randn(1, nheads, seqlen_q, seqlen_k, dtype=torch.float, device=device)
elif bias_shape == 'b11k':
bias = torch.randn(batch_size, 1, 1, seqlen_k, dtype=torch.float, device=device)
elif bias_shape == 'b1qk':
bias = torch.randn(batch_size, 1, seqlen_q, seqlen_k, dtype=torch.float, device=device)
else:
bias = None
q, k, v = [x.detach().requires_grad_() for x in [q, k, v]]
output_0 = flash_attn_func(q, k, v, bias, causal)
g = torch.randn_like(output_0)
dq_0, dk_0, dv_0 = torch.autograd.grad(output_0, (q, k, v), g)
# The SEQUENCE_PARALLEL option for the bwd to makes dq non-deterministic
deterministic_dq = False
# Numerical error if we just do any arithmetic on dq
dq_atol = ((dq_0 + 0.3 - 0.3) - dq_0).abs().max().item()
equal_fn = torch.equal if deterministic_dq else partial(torch.allclose, atol=dq_atol)
# Run 10000 times and check that the results don't change
for i in range(10000):
output = flash_attn_func(q, k, v, bias, causal)
output_equal = torch.equal(output, output_0)
if not output_equal: # Printing / computing diff sometimes makes the race condition disappear
print(f'{dtype = }, {causal = }, {d = }, {seqlen_q = }, {seqlen_k = }, {bias_shape = }, {i = }')
print(f'Output max diff: {(output - output_0).abs().max().item()}')
assert torch.equal(output, output_0)
dq, dk, dv = torch.autograd.grad(output, (q, k, v), g)
dq_equal = equal_fn(dq, dq_0)
dk_equal = torch.equal(dk, dk_0)
dv_equal = torch.equal(dv, dv_0)
if not (dq_equal and dk_equal and dv_equal):
print(f'{dtype = }, {causal = }, {d = }, {seqlen_q = }, {seqlen_k = }, {bias_shape = }, {i = }')
print(f'dQ max diff: {(dq - dq_0).abs().max().item()}')
print(f'dK max diff: {(dk - dk_0).abs().max().item()}')
print(f'dV max diff: {(dv - dv_0).abs().max().item()}')
assert equal_fn(dq, dq_0)
assert torch.equal(dk, dk_0)
assert torch.equal(dv, dv_0)
| flash-attention-main | tests/test_flash_attn.py |
# Run test with:
# torchrun --no_python --nproc_per_node=8 pytest -q -s tests/losses/test_cross_entropy_parallel.py
import math
import torch
import torch.nn.functional as F
import pytest
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from flash_attn.losses.cross_entropy_parallel import CrossEntropyLossParallel
is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8
@pytest.mark.parametrize('dtype', [torch.float16, torch.float32] + ([torch.bfloat16] if is_sm8x else []))
# @pytest.mark.parametrize('dtype', [torch.bfloat16])
@pytest.mark.parametrize('inplace_backward', [False, True])
# @pytest.mark.parametrize('inplace_backward', [False])
@pytest.mark.parametrize('vocab_size', [50264])
@pytest.mark.parametrize('world_size', [1, 2, 4, 8])
# @pytest.mark.parametrize('world_size', [2])
def test_cross_entropy_loss_apex(vocab_size, world_size, inplace_backward, dtype):
assert vocab_size % world_size == 0
rtol, atol = ((1e-5, 1e-6) if dtype == torch.float32
else ((1e-3, 1e-4) if dtype == torch.float16 else (1e-2, 3e-3)))
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl', init_method='env://')
partition_vocab_size = vocab_size // world_size
device = f'cuda:{torch.distributed.get_rank()}'
assert world_size <= torch.distributed.get_world_size()
parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
rank = parallel_state.get_tensor_model_parallel_rank()
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 128
x_pt = (torch.randn(batch_size * seqlen, vocab_size, device=device,
dtype=dtype) * 10).requires_grad_()
x = tensor_parallel.scatter_to_tensor_model_parallel_region(x_pt).detach().clone().requires_grad_()
y = torch.randint(0, vocab_size, (batch_size * seqlen,), dtype=torch.long, device=device)
y[torch.randperm(batch_size * seqlen)[:10]] = -100
model_pt = torch.nn.CrossEntropyLoss(reduction='none')
model = CrossEntropyLossParallel(reduction='none', inplace_backward=inplace_backward)
out = model(x, y)
out_pt = model_pt(x_pt.float(), y)
assert torch.allclose(out, out_pt, rtol=1e-5, atol=1e-6)
g = torch.randn_like(out)
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad[:, (rank * partition_vocab_size):(rank + 1) * partition_vocab_size], rtol=rtol, atol=atol)
parallel_state.destroy_model_parallel()
| flash-attention-main | tests/losses/test_cross_entropy_parallel.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flass_attn.losses.cross_entropy_apex import CrossEntropyLossApex
is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8
@pytest.mark.parametrize('dtype', [torch.float16, torch.float32] + ([torch.bfloat16] if is_sm8x else []))
# @pytest.mark.parametrize('dtype', [torch.float16])
@pytest.mark.parametrize('inplace_backward', [False, True])
# @pytest.mark.parametrize('inplace_backward', [False])
@pytest.mark.parametrize('vocab_size', [50257])
def test_cross_entropy_loss_apex(vocab_size, inplace_backward, dtype):
device = 'cuda'
rtol, atol = (1e-5, 1e-6) if dtype == torch.float32 else (1e-3, 1e-4)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 128
x_pt = torch.randn(batch_size * seqlen, vocab_size, device=device, dtype=dtype, requires_grad=True)
x = x_pt.detach().clone().requires_grad_()
y = torch.randint(0, vocab_size, (batch_size * seqlen,), dtype=torch.long, device=device)
y[torch.randperm(batch_size * seqlen)[:10]] = -100
model_pt = torch.nn.CrossEntropyLoss()
model = CrossEntropyLossApex(inplace_backward=inplace_backward)
out = model(x, y)
out_pt = model_pt(x_pt.float(), y)
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol)
g = torch.randn_like(out)
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=atol)
| flash-attention-main | tests/losses/test_cross_entropy_apex.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flash_attn.ops.layer_norm import DropoutAddLayerNorm, dropout_add_layer_norm
is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8
@pytest.mark.parametrize('has_rowscale', [True, False])
# @pytest.mark.parametrize('has_rowscale', [True])
@pytest.mark.parametrize('has_residual', [True, False])
# @pytest.mark.parametrize('has_residual', [False])
@pytest.mark.parametrize('dropout_p', [0.37, 0.0])
# @pytest.mark.parametrize('dropout_p', [0.0])
@pytest.mark.parametrize('weight_dtype', [torch.float32, torch.float16])
# @pytest.mark.parametrize('weight_dtype', [torch.float32])
@pytest.mark.parametrize('input_dtype,residual_dtype',
[(torch.float16, torch.float16), (torch.float16, torch.float32),
(torch.float32, torch.float32)]
+ ([(torch.bfloat16, torch.bfloat16), (torch.bfloat16, torch.float32)] if is_sm8x else []))
# @pytest.mark.parametrize('input_dtype,residual_dtype', [(torch.float16, torch.float32)])
@pytest.mark.parametrize('hidden_size', [768, 1024, 1280, 1536, 1600, 2048, 2560, 3072, 4096, 5120])
# @pytest.mark.parametrize('hidden_size', [768])
def test_dropout_layer_norm_training(hidden_size, input_dtype, residual_dtype, weight_dtype,
dropout_p, has_residual, has_rowscale):
if weight_dtype == torch.float16 and input_dtype == torch.bfloat16:
pytest.skip() # Not supported
# Backward numerical error is high, and this case isn't used
if has_rowscale and not has_residual:
pytest.skip()
device = 'cuda'
# rtol, atol = (1e-5, 1e-6) if input_dtype == torch.float32 else (1e-3, 1e-4)
rtol, atol = (1e-3, 1e-4)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 512
x0_pt = torch.randn(batch_size, seqlen, hidden_size, device=device, dtype=input_dtype,
requires_grad=True)
x0 = x0_pt.detach().clone().requires_grad_()
x0_ref = x0_pt.detach().clone().float().requires_grad_()
if has_residual:
x1_pt = torch.randn_like(x0, dtype=residual_dtype, requires_grad=True)
x1 = x1_pt.detach().clone().requires_grad_()
x1_ref = x1_pt.detach().clone().float().requires_grad_()
else:
x1 = None
if has_rowscale:
rowscale = torch.empty(batch_size, seqlen, device=device, dtype=input_dtype)
survival_rate = 0.87
rowscale = rowscale.bernoulli_(survival_rate) / survival_rate
x0_scaled_pt = x0_pt * rearrange(rowscale, '... -> ... 1')
x0_scaled_ref = x0_ref * rearrange(rowscale, '... -> ... 1')
else:
rowscale = None
x0_scaled_pt = x0_pt
x0_scaled_ref = x0_ref
model_pt = torch.nn.LayerNorm(hidden_size, device=device, dtype=weight_dtype)
torch.nn.init.normal_(model_pt.weight)
torch.nn.init.normal_(model_pt.bias)
model_ref = torch.nn.LayerNorm(hidden_size, device=device, dtype=torch.float32)
model = DropoutAddLayerNorm(hidden_size, p=dropout_p, device=device, dtype=weight_dtype)
with torch.no_grad():
model.weight.copy_(model_pt.weight)
model.bias.copy_(model_pt.bias)
model_ref.weight.copy_(model_pt.weight)
model_ref.bias.copy_(model_pt.bias)
residual_in_fp32 = (not has_residual) and residual_dtype == torch.float32
out, dmask = dropout_add_layer_norm(x0, x1, model.weight, model.bias, model.p,
model.epsilon, rowscale=rowscale,
residual_in_fp32=residual_in_fp32, return_dropout_mask=True)
assert out.dtype == input_dtype
print(f'Actual dropout fraction: {1 - dmask.float().mean().item()}')
if has_residual:
residual_pt = ((x0_scaled_pt.float() * dmask.float()) / (1 - dropout_p) + x1_pt.float()).to(dtype=residual_dtype)
residual_ref = (x0_scaled_ref * dmask.float()) / (1 - dropout_p) + x1_ref
else:
residual_pt = ((x0_scaled_pt.float() * dmask.float()) / (1 - dropout_p)).to(dtype=residual_dtype)
residual_ref = (x0_scaled_ref * dmask.float()) / (1 - dropout_p)
out_pt = model_pt(residual_pt.to(dtype=weight_dtype)).to(dtype=input_dtype)
out_ref = model_ref(residual_ref)
assert (out - out_ref).abs().max() <= 4 * (out_pt - out_ref).abs().max() + 1e-4
g = torch.randn_like(out) / batch_size
out_pt.backward(g)
out.backward(g)
out_ref.backward(g)
assert (x0.grad - x0_ref.grad).abs().max() <= 4 * (x0_pt.grad - x0_ref.grad).abs().max() + 1e-4
if has_residual:
assert (x1.grad - x1_ref.grad).abs().max() <= 4 * (x1_pt.grad - x1_ref.grad).abs().max() + 1e-4
assert (model.weight.grad - model_ref.weight.grad).abs().max() <= 2 * (model_pt.weight.grad - model_ref.weight.grad).abs().max() + 3e-5
assert (model.bias.grad - model_ref.bias.grad).abs().max() <= 2 * (model_pt.bias.grad - model_ref.bias.grad).abs().max() + 3e-5
@pytest.mark.parametrize('weight_dtype', [torch.float32, torch.float16])
@pytest.mark.parametrize('input_dtype,residual_dtype',
[(torch.float16, torch.float16), (torch.float16, torch.float32),
(torch.float32, torch.float32)]
+ ([(torch.bfloat16, torch.bfloat16), (torch.bfloat16, torch.float32)] if is_sm8x else []))
@pytest.mark.parametrize('hidden_size', [768, 1024, 1280, 1536, 1600, 2048, 2560, 3072, 4096, 5120])
def test_dropout_layer_norm_eval(hidden_size, input_dtype, residual_dtype, weight_dtype):
if weight_dtype == torch.float16 and input_dtype == torch.bfloat16:
pytest.skip() # Not supported
device = 'cuda'
# rtol, atol = (1e-5, 1e-6) if dtype == torch.float32 else (1e-3, 1e-4)
rtol, atol = (1e-3, 1e-4)
dropout_p = 0.37
# set seed
torch.random.manual_seed(0)
batch_size = 32
seqlen = 512
x0_pt = torch.randn(batch_size, seqlen, hidden_size, device=device, dtype=input_dtype,
requires_grad=True)
x0 = x0_pt.detach().clone().requires_grad_()
x0_ref = x0_pt.detach().clone().float().requires_grad_()
x1_pt = torch.randn_like(x0, dtype=residual_dtype, requires_grad=True)
x1 = x1_pt.detach().clone().requires_grad_()
x1_ref = x1_pt.detach().clone().float().requires_grad_()
model_pt = torch.nn.LayerNorm(hidden_size, device=device, dtype=weight_dtype)
model = DropoutAddLayerNorm(hidden_size, p=dropout_p, device=device, dtype=weight_dtype)
model_ref = torch.nn.LayerNorm(hidden_size, device=device, dtype=torch.float32)
with torch.no_grad():
model.weight.copy_(model_pt.weight)
model.bias.copy_(model_pt.bias)
model_ref.weight.copy_(model_pt.weight)
model_ref.bias.copy_(model_pt.bias)
model_pt.eval()
model.eval()
model_ref.eval()
out = model(x0, x1)
residual_pt = (x0_pt.float() + x1_pt.float()).to(dtype=residual_dtype)
residual_ref = x0_ref + x1_ref
out_pt = model_pt(residual_pt.to(dtype=weight_dtype)).to(input_dtype)
out_ref = model_ref(residual_ref)
assert (out - out_ref).abs().max() <= 4 * (out_pt - out_ref).abs().max() + 1e-4
@pytest.mark.parametrize('has_rowscale', [True, False])
@pytest.mark.parametrize('has_residual', [True, False])
@pytest.mark.parametrize('dropout_p', [0.37, 0.0])
@pytest.mark.parametrize('weight_dtype', [torch.float32, torch.float16])
@pytest.mark.parametrize('input_dtype,residual_dtype',
[(torch.float16, torch.float16), (torch.float16, torch.float32),
(torch.float32, torch.float32)]
+ ([(torch.bfloat16, torch.bfloat16), (torch.bfloat16, torch.float32)] if is_sm8x else []))
@pytest.mark.parametrize('hidden_size', [768, 1024, 1280, 1536, 1600, 2048, 2560, 3072, 4096, 5120])
def test_dropout_layer_norm_prenorm_training(hidden_size, input_dtype, residual_dtype, weight_dtype,
dropout_p, has_residual, has_rowscale):
if weight_dtype == torch.float16 and input_dtype == torch.bfloat16:
pytest.skip() # Not supported
# Backward numerical error is high, and this case isn't used
if has_rowscale and not has_residual:
pytest.skip()
device = 'cuda'
# rtol, atol = (1e-5, 1e-6) if input_dtype == torch.float32 else (1e-3, 1e-4)
rtol, atol = (1e-3, 2e-4)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 512
x0_pt = torch.randn(batch_size, seqlen, hidden_size, device=device, dtype=input_dtype,
requires_grad=True)
x0 = x0_pt.detach().clone().requires_grad_()
x0_ref = x0_pt.detach().clone().float().requires_grad_()
if has_residual:
x1_pt = torch.randn_like(x0, dtype=residual_dtype, requires_grad=True)
x1 = x1_pt.detach().clone().requires_grad_()
x1_ref = x1_pt.detach().clone().float().requires_grad_()
else:
x1 = None
if has_rowscale:
rowscale = torch.empty(batch_size, seqlen, device=device, dtype=input_dtype)
survival_rate = 0.87
rowscale = rowscale.bernoulli_(survival_rate) / survival_rate
x0_scaled_pt = x0_pt * rearrange(rowscale, '... -> ... 1')
x0_scaled_ref = x0_ref * rearrange(rowscale, '... -> ... 1')
else:
rowscale = None
x0_scaled_pt = x0_pt
x0_scaled_ref = x0_ref
model_pt = torch.nn.LayerNorm(hidden_size, device=device, dtype=weight_dtype)
model_ref = torch.nn.LayerNorm(hidden_size, device=device, dtype=torch.float32)
model = DropoutAddLayerNorm(hidden_size, prenorm=True, p=dropout_p, device=device,
dtype=weight_dtype)
with torch.no_grad():
model.weight.copy_(model_pt.weight)
model.bias.copy_(model_pt.bias)
model_ref.weight.copy_(model_pt.weight)
model_ref.bias.copy_(model_pt.bias)
residual_in_fp32 = (not has_residual) and residual_dtype == torch.float32
out, residual, dmask = dropout_add_layer_norm(x0, x1, model.weight, model.bias, model.p,
model.epsilon, rowscale=rowscale, prenorm=True,
residual_in_fp32=residual_in_fp32,
return_dropout_mask=True)
print(f'Actual dropout fraction: {1 - dmask.float().mean().item()}')
if has_residual:
residual_pt = ((x0_scaled_pt.float() * dmask.float()) / (1 - dropout_p) + x1_pt.float()).to(dtype=residual_dtype)
residual_ref = (x0_scaled_ref * dmask.float()) / (1 - dropout_p) + x1_ref
else:
residual_pt = ((x0_scaled_pt.float() * dmask.float()) / (1 - dropout_p)).to(dtype=residual_dtype)
residual_ref = (x0_scaled_ref * dmask.float()) / (1 - dropout_p)
out_pt = model_pt(residual_pt.to(dtype=weight_dtype)).to(dtype=input_dtype)
out_ref = model_ref(residual_ref)
assert out.dtype == input_dtype
assert residual.dtype == residual_dtype
assert (out - out_ref).abs().max() <= 4 * (out_pt - out_ref).abs().max() + 1e-4
assert (residual - residual_ref).abs().max() <= 4 * (residual_pt - residual_ref).abs().max() + 1e-4
g = torch.randn_like(out) / batch_size
(out_pt * F.sigmoid(residual_pt)).backward(g)
(out * F.sigmoid(residual)).backward(g)
(out_ref * F.sigmoid(residual_ref.to(dtype=residual_dtype))).backward(g)
assert (x0.grad - x0_ref.grad).abs().max() <= 4 * (x0_pt.grad - x0_ref.grad).abs().max() + 1e-4
if has_residual:
assert (x1.grad - x1_ref.grad).abs().max() <= 4 * (x1_pt.grad - x1_ref.grad).abs().max() + 1e-4
assert (model.weight.grad - model_ref.weight.grad).abs().max() <= 2 * (model_pt.weight.grad - model_ref.weight.grad).abs().max() + 2e-4
assert (model.bias.grad - model_ref.bias.grad).abs().max() <= 2 * (model_pt.bias.grad - model_ref.bias.grad).abs().max() + 2e-4
@pytest.mark.parametrize('weight_dtype', [torch.float32, torch.float16])
@pytest.mark.parametrize('input_dtype,residual_dtype',
[(torch.float16, torch.float16), (torch.float16, torch.float32),
(torch.float32, torch.float32)]
+ ([(torch.bfloat16, torch.bfloat16), (torch.bfloat16, torch.float32)] if is_sm8x else []))
@pytest.mark.parametrize('hidden_size', [768, 1024, 1280, 1536, 1600, 2048, 2560, 3072, 4096, 5120])
def test_dropout_layer_norm_prenorm_eval(hidden_size, input_dtype, residual_dtype, weight_dtype):
if weight_dtype == torch.float16 and input_dtype == torch.bfloat16:
pytest.skip() # Not supported
device = 'cuda'
# rtol, atol = (1e-5, 1e-6) if dtype == torch.float32 else (1e-3, 1e-4)
rtol, atol = (1e-3, 1e-4)
dropout_p = 0.37
# set seed
torch.random.manual_seed(0)
batch_size = 32
seqlen = 512
x0_pt = torch.randn(batch_size, seqlen, hidden_size, device=device, dtype=input_dtype,
requires_grad=True)
x0 = x0_pt.detach().clone().requires_grad_()
x0_ref = x0_pt.detach().clone().float().requires_grad_()
x1_pt = torch.randn_like(x0, dtype=residual_dtype, requires_grad=True)
x1 = x1_pt.detach().clone().requires_grad_()
x1_ref = x1_pt.detach().clone().float().requires_grad_()
model_pt = torch.nn.LayerNorm(hidden_size, device=device, dtype=weight_dtype)
model = DropoutAddLayerNorm(hidden_size, prenorm=True, p=dropout_p, device=device,
dtype=weight_dtype)
model_ref = torch.nn.LayerNorm(hidden_size, device=device, dtype=torch.float32)
with torch.no_grad():
model.weight.copy_(model_pt.weight)
model.bias.copy_(model_pt.bias)
model_ref.weight.copy_(model_pt.weight)
model_ref.bias.copy_(model_pt.bias)
model_pt.eval()
model.eval()
model_ref.eval()
out, residual = model(x0, x1)
residual_pt = (x0_pt.float() + x1_pt.float()).to(dtype=residual_dtype)
residual_ref = x0_ref + x1_ref
out_pt = model_pt(residual_pt.to(dtype=weight_dtype)).to(input_dtype)
out_ref = model_ref(residual_ref)
assert (out - out_ref).abs().max() <= 4 * (out_pt - out_ref).abs().max() + 1e-4
assert (residual - residual_ref).abs().max() <= 4 * (residual_pt - residual_ref).abs().max() + 1e-4
| flash-attention-main | tests/ops/test_dropout_layer_norm.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from flash_attn.ops.fused_dense import FusedDenseTD, FusedDenseGeluDenseTD
from flash_attn.ops.fused_dense import FusedDenseResidual, FusedDenseResGeluDense
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
@pytest.mark.parametrize('out_features', [1024, 4096])
@pytest.mark.parametrize('in_features', [1024, 4096])
def test_fused_linear_bias(in_features, out_features, dtype):
device = 'cuda'
rtol, atol = (3e-3, 1e-2) if dtype == torch.bfloat16 else (3e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 512
x_pt = torch.randn(batch_size, seqlen, in_features, device=device, dtype=dtype, requires_grad=True)
x = x_pt.detach().clone().requires_grad_()
model_pt = torch.nn.Linear(in_features, out_features, device=device, dtype=dtype)
model = FusedDenseTD(in_features, out_features, device=device, dtype=dtype)
with torch.no_grad():
model.weight.copy_(model_pt.weight)
model.bias.copy_(model_pt.bias)
out_pt = model_pt(x_pt)
out = model(x)
# with torch.no_grad():
# out_fl = F.linear(x_pt.float(), model.weight.float(), model.bias.float()).half()
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol)
# If we don't divide by batch_size, the gradient gets a bit too large.
g = torch.randn_like(out) / 32
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=atol)
# The error for d_weight and d_bias is quite a bit higher
assert torch.allclose(model.weight.grad, model_pt.weight.grad, rtol=rtol, atol=atol * 10)
assert torch.allclose(model.bias.grad, model_pt.bias.grad, rtol=rtol, atol=atol * 5)
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
@pytest.mark.parametrize('out_features,in_features', [(1024, 1024), (4096, 4096)])
def test_fused_linear_bias_residual(in_features, out_features, dtype):
device = 'cuda'
rtol, atol = (3e-3, 1e-2) if dtype == torch.bfloat16 else (3e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 512
x_pt = torch.randn(batch_size, seqlen, in_features, device=device, dtype=dtype, requires_grad=True)
x = x_pt.detach().clone().requires_grad_()
model_pt = torch.nn.Linear(in_features, out_features, device=device, dtype=dtype)
model = FusedDenseResidual(in_features, out_features, device=device, dtype=dtype)
with torch.no_grad():
model.weight.copy_(model_pt.weight)
model.bias.copy_(model_pt.bias)
out_pt = model_pt(x_pt) + F.gelu(x_pt) # Just add some random function of the residual x_pt
out, x_copy = model(x)
out = out + F.gelu(x_copy)
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol * 2)
# If we don't divide by batch_size, the gradient gets a bit too large.
g = torch.randn_like(out) / 32
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=atol)
# The error for d_weight and d_bias is quite a bit higher
assert torch.allclose(model.weight.grad, model_pt.weight.grad, rtol=rtol, atol=atol * 10)
assert torch.allclose(model.bias.grad, model_pt.bias.grad, rtol=rtol, atol=atol * 5)
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
@pytest.mark.parametrize('heuristic', [1, -1])
@pytest.mark.parametrize('checkpoint_lvl', [0, 1, 2])
@pytest.mark.parametrize('out_features', [1024, 4096])
@pytest.mark.parametrize('in_features', [1024, 4096])
def test_fused_dense_gelu_dense(in_features, out_features, checkpoint_lvl, heuristic, dtype):
device = 'cuda'
rtol, atol = (3e-3, 1e-2) if dtype == torch.bfloat16 else (3e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 512
x_pt = torch.randn(batch_size, seqlen, in_features, device=device, dtype=dtype, requires_grad=True)
x = x_pt.detach().clone().requires_grad_()
model_pt_fc1 = torch.nn.Linear(in_features, out_features, device=device, dtype=dtype)
model_pt_fc2 = torch.nn.Linear(out_features, in_features, device=device, dtype=dtype)
model = FusedDenseGeluDenseTD(in_features, out_features, in_features,
checkpoint_lvl=checkpoint_lvl, heuristic=heuristic,
device=device, dtype=dtype)
with torch.no_grad():
model.fc1.weight.copy_(model_pt_fc1.weight)
model.fc1.bias.copy_(model_pt_fc1.bias)
model.fc2.weight.copy_(model_pt_fc2.weight)
model.fc2.bias.copy_(model_pt_fc2.bias)
out_pt = model_pt_fc2(F.gelu(model_pt_fc1(x_pt), approximate='tanh'))
out = model(x)
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol)
# If we don't divide by batch_size, the gradient gets a bit too large.
g = torch.randn_like(out) / 32
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=atol)
# The error for d_weight and d_bias is quite a bit higher
assert torch.allclose(model.fc1.weight.grad, model_pt_fc1.weight.grad, rtol=rtol, atol=atol * 10)
assert torch.allclose(model.fc1.bias.grad, model_pt_fc1.bias.grad, rtol=rtol, atol=atol * 5)
assert torch.allclose(model.fc2.weight.grad, model_pt_fc2.weight.grad, rtol=rtol, atol=atol * 10)
assert torch.allclose(model.fc2.bias.grad, model_pt_fc2.bias.grad, rtol=rtol, atol=atol * 5)
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
@pytest.mark.parametrize('checkpoint_lvl', [0, 1, 2])
@pytest.mark.parametrize('out_features', [1024, 4096])
@pytest.mark.parametrize('in_features', [1024, 4096])
def test_fused_dense_residual_gelu_dense(in_features, out_features, checkpoint_lvl, dtype):
device = 'cuda'
rtol, atol = (3e-3, 1e-2) if dtype == torch.bfloat16 else (3e-3, 1e-3)
# set seed
torch.random.manual_seed(0)
batch_size = 8
seqlen = 512
x_pt = torch.randn(batch_size, seqlen, in_features, device=device, dtype=dtype, requires_grad=True)
x = x_pt.detach().clone().requires_grad_()
model_pt_fc1 = torch.nn.Linear(in_features, out_features, device=device, dtype=dtype)
model_pt_fc2 = torch.nn.Linear(out_features, in_features, device=device, dtype=dtype)
model = FusedDenseResGeluDense(in_features, out_features, in_features,
checkpoint_lvl=checkpoint_lvl,
device=device, dtype=dtype)
with torch.no_grad():
model.fc1.weight.copy_(model_pt_fc1.weight)
model.fc1.bias.copy_(model_pt_fc1.bias)
model.fc2.weight.copy_(model_pt_fc2.weight)
model.fc2.bias.copy_(model_pt_fc2.bias)
out_pt = model_pt_fc2(F.gelu(model_pt_fc1(x_pt), approximate='tanh')) + F.gelu(x_pt)
out, x_copy = model(x)
out = out + F.gelu(x_copy)
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol * 2)
# If we don't divide by batch_size, the gradient gets a bit too large.
g = torch.randn_like(out) / 32
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=atol)
# The error for d_weight and d_bias is quite a bit higher
assert torch.allclose(model.fc1.weight.grad, model_pt_fc1.weight.grad, rtol=rtol, atol=atol * 10)
assert torch.allclose(model.fc1.bias.grad, model_pt_fc1.bias.grad, rtol=rtol, atol=atol * 5)
assert torch.allclose(model.fc2.weight.grad, model_pt_fc2.weight.grad, rtol=rtol, atol=atol * 10)
assert torch.allclose(model.fc2.bias.grad, model_pt_fc2.bias.grad, rtol=rtol, atol=atol * 5)
| flash-attention-main | tests/ops/test_fused_dense.py |
from functools import partial
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from flash_attn.utils.benchmark import benchmark_forward, benchmark_all, pytorch_profiler
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
# from flash_attn.triton.fused_attention import attention as attention
from flash_attn.flash_attn_triton import flash_attn_qkvpacked_func
from flash_attn.flash_attn_triton_og import attention as attention_og
try:
from flash_attn.fused_softmax import scaled_upper_triang_masked_softmax
except ImportError:
scaled_upper_triang_masked_softmax = None
def attention_pytorch(qkv, dropout_p=0.0, causal=True):
"""
Arguments:
qkv: (batch_size, seqlen, 3, nheads, head_dim)
dropout_p: float
Output:
output: (batch_size, seqlen, nheads, head_dim)
"""
batch_size, seqlen, _, nheads, d = qkv.shape
q, k, v = qkv.unbind(dim=2)
q = rearrange(q, 'b t h d -> (b h) t d')
k = rearrange(k, 'b s h d -> (b h) d s')
softmax_scale = 1.0 / math.sqrt(d)
# Preallocate attn_weights for `baddbmm`
scores = torch.empty(batch_size * nheads, seqlen, seqlen, dtype=qkv.dtype, device=qkv.device)
scores = rearrange(torch.baddbmm(scores, q, k, beta=0, alpha=softmax_scale),
'(b h) t s -> b h t s', h=nheads)
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1)
attention_drop = F.dropout(attention, dropout_p)
output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
return output.to(dtype=qkv.dtype)
def attention_megatron(qkv):
"""
Arguments:
qkv: (batch_size, seqlen, 3, nheads, head_dim)
Output:
output: (batch_size, seqlen, nheads, head_dim)
"""
batch_size, seqlen, _, nheads, d = qkv.shape
q, k, v = qkv.unbind(dim=2)
q = rearrange(q, 'b t h d -> (b h) t d')
k = rearrange(k, 'b s h d -> (b h) d s')
softmax_scale = 1.0 / math.sqrt(d)
# Preallocate attn_weights for `baddbmm`
scores = torch.empty(batch_size * nheads, seqlen, seqlen, dtype=qkv.dtype, device=qkv.device)
scores = rearrange(torch.baddbmm(scores, q, k, beta=0, alpha=softmax_scale),
'(b h) t s -> b h t s', h=nheads)
attention = scaled_upper_triang_masked_softmax(scores, None, scale=1.0)
output = torch.einsum('bhts,bshd->bthd', attention, v)
return output.to(dtype=qkv.dtype)
torch.manual_seed(0)
repeats = 30
batch_size = 2
seqlen = 4096
nheads = 12
headdim = 128
# batch_size = 64
# seqlen = 512
# nheads = 8
# headdim = 128
dropout_p = 0.0
causal = True
dtype = torch.bfloat16
device = 'cuda'
qkv = torch.randn(batch_size, seqlen, 3, nheads, headdim, device=device, dtype=dtype,
requires_grad=True)
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=qkv.device)
benchmark_all(flash_attn_unpadded_qkvpacked_func, rearrange(qkv, 'b s ... -> (b s) ...'),
cu_seqlens, seqlen, dropout_p, causal=causal, repeats=repeats, desc='FlashAttention')
benchmark_all(attention_pytorch, qkv, dropout_p, causal=causal,
repeats=repeats, desc='PyTorch Attention')
benchmark_all(flash_attn_qkvpacked_func, qkv, causal, repeats=repeats, desc='FlashAttention Triton')
pytorch_profiler(flash_attn_qkvpacked_func, qkv, causal, backward=True)
q, k, v = [torch.randn(batch_size, nheads, seqlen, headdim, device=device, dtype=dtype,
requires_grad=True) for _ in range(3)]
benchmark_all(attention_og, q, k, v, 1.0, repeats=repeats, desc='FlashAttention Triton OG')
# pytorch_profiler(attention, q, k, v, 1.0, backward=True)
if scaled_upper_triang_masked_softmax is not None:
benchmark_all(attention_megatron, qkv, repeats=repeats, desc='Megatron Attention')
| flash-attention-main | benchmarks/benchmark_causal.py |
from functools import partial
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from flash_attn.utils.benchmark import benchmark_all, benchmark_forward, benchmark_backward, benchmark_combined
from flash_attn.bert_padding import unpad_input, pad_input
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
def attention_ref(qkv, attn_mask, dropout_p, upcast=False, causal=False):
"""
Arguments:
qkv: (batch_size, seqlen, 3, nheads, head_dim)
attn_mask: (batch_size, seqlen)
dropout_p: float
Output:
output: (batch_size, seqlen, nheads, head_dim)
attention: softmax after dropout
"""
q, k, v = (qkv.float() if upcast else qkv).unbind(dim=2)
seqlen = qkv.shape[1]
d = qkv.shape[-1]
scores = torch.einsum('bthd,bshd->bhts', q, k / math.sqrt(d))
scores.masked_fill_(rearrange(~attn_mask, 'b s -> b 1 1 s'), float('-inf'))
if causal:
causal_mask = torch.triu(torch.ones(seqlen, seqlen, dtype=torch.bool, device=qkv.device), 1)
scores.masked_fill_(causal_mask, float('-inf'))
attention = torch.softmax(scores, dim=-1)
attention_drop = F.dropout(attention, dropout_p)
output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
# return output.to(dtype=qkv.dtype), attention.to(dtype=qkv.dtype)
return output.to(dtype=qkv.dtype)
torch.manual_seed(0)
repeats = 30
batch_size = 64
nheads = 16
seqlen = 1024
n = 1024
d = n // nheads
dropout_p = 0.1
causal = False
dtype = torch.float16
device = 'cuda'
x = torch.randn(batch_size, seqlen, n, device='cuda', dtype=dtype, requires_grad=True)
Wqkv = torch.nn.Linear(nheads * d, 3 * nheads * d, device=device, dtype=dtype)
lengths = torch.randint(seqlen - 20, seqlen, (batch_size, 1), device='cuda')
attention_mask_bool = repeat(torch.arange(seqlen, device='cuda'), 's -> b s', b=batch_size) < lengths
attention_mask = torch.zeros(batch_size, seqlen, device='cuda', dtype=dtype)
attention_mask[~attention_mask_bool] = -10000.0
attention_mask = rearrange(attention_mask, 'b s -> b 1 1 s')
x_unpad, indices, cu_seqlens, max_seqlen_in_batch = unpad_input(x, attention_mask_bool)
qkv_unpad = rearrange(Wqkv(x_unpad), 'nnz (t h d) -> nnz t h d', t=3,
h=nheads).detach().requires_grad_()
qkv = rearrange(Wqkv(x), 'b s (t h d) -> b s t h d', t=3, h=nheads).detach().requires_grad_()
fn = lambda qkv_unpad: flash_attn_unpadded_qkvpacked_func(
qkv_unpad, cu_seqlens, max_seqlen_in_batch, dropout_p, causal=causal
)
benchmark_all(fn, qkv_unpad, repeats=repeats, desc='FlashAttention')
fn = lambda qkv: attention_ref(qkv, attention_mask_bool, dropout_p, causal=causal)
benchmark_all(fn, qkv, repeats=repeats, desc='PyTorch Standard Attention')
| flash-attention-main | benchmarks/benchmark_flash_attention.py |
# [2022-10-23] Copied from https://github.com/NVIDIA/apex/blob/master/apex/transformer/functional/fused_softmax.py
# for benchmarking.
# We added support for seqlen=2k and seqlen=4k
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex._autocast_utils import _cast_if_autocast_enabled
from apex.transformer.enums import AttnMaskType
from fused_softmax_lib import scaled_masked_softmax_forward, scaled_masked_softmax_backward
from fused_softmax_lib import scaled_masked_softmax_get_batch_per_block
from fused_softmax_lib import scaled_upper_triang_masked_softmax_forward, scaled_upper_triang_masked_softmax_backward
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
scale_t = torch.tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_upper_triang_masked_softmax_backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
def scaled_upper_triang_masked_softmax(inputs, _, scale):
b, np, sq, sk = inputs.size()
assert sq == sk, "causal mask is only for self attention"
# Reshaping input to 3D tensor (attn_batches, sq, sk)
inputs = inputs.view(-1, sq, sk)
args = _cast_if_autocast_enabled(inputs, scale)
with torch.cuda.amp.autocast(enabled=False):
probs = ScaledUpperTriangMaskedSoftmax.apply(*args)
return probs.view(b, np, sq, sk)
# NOTE (mkozuki): `ScaledMaskedSoftmax` somehow doesn't work well with `torch.cuda.amp.custom_fwd`.
# Without `cast_inputs` kwarg, somehow inputs are not cast to dtype used in the autocast context.
# So I needed to manually write two `torch.autograd.Function` inheritances.
# Fused operation which performs following three operations in sequence
# 1. Scale the tensor.
# 2. Apply the mask.
# 3. Perform softmax.
class ScaledMaskedSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, mask, scale):
scale_t = torch.tensor([scale])
softmax_results = scaled_masked_softmax_forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_masked_softmax_backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
def scaled_masked_softmax(inputs, mask, scale):
# input is 4D tensor (b, np, sq, sk)
args = _cast_if_autocast_enabled(inputs, mask, scale)
with torch.cuda.amp.autocast(enabled=False):
return ScaledMaskedSoftmax.apply(*args)
class FusedScaleMaskSoftmax(torch.nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
attn_mask_type: attention mask type (pad or causal)
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
input_in_fp16,
input_in_bf16,
attn_mask_type,
scaled_masked_softmax_fusion,
mask_func,
softmax_in_fp32,
scale,
):
super().__init__()
self.input_in_fp16 = input_in_fp16
self.input_in_bf16 = input_in_bf16
if self.input_in_fp16 and self.input_in_bf16:
raise RuntimeError(
"both fp16 and bf16 flags cannot be active at the same time."
)
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
self.attn_mask_type = attn_mask_type
self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
if not (self.scale is None or softmax_in_fp32):
raise RuntimeError("softmax should be in fp32 when scaled")
if self.scaled_masked_softmax_fusion:
if self.attn_mask_type == AttnMaskType.causal:
self.fused_softmax_func = scaled_upper_triang_masked_softmax
elif self.attn_mask_type == AttnMaskType.padding:
self.fused_softmax_func = scaled_masked_softmax
else:
raise ValueError("Invalid attn_mask_type.")
def forward(self, input, mask):
# [b, np, sq, sk]
assert input.dim() == 4
if self.is_kernel_available(mask, *input.size()):
return self.forward_fused_softmax(input, mask)
else:
return self.forward_torch_softmax(input, mask)
def is_kernel_available(self, mask, b, np, sq, sk):
attn_batches = b * np
if (
self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_float16 # input must be fp16
and (
self.attn_mask_type == AttnMaskType.causal
or (self.attn_mask_type == AttnMaskType.padding and mask is not None)
)
and 16 < sk <= 8192 # sk must be 16 ~ 8192
and sq % 4 == 0 # sq must be divisor of 4
and sk % 4 == 0 # sk must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 8192:
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
if self.attn_mask_type == AttnMaskType.causal:
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(self, input, mask):
# input.shape = [b, np, sq, sk]
scale = self.scale if self.scale is not None else 1.0
return self.fused_softmax_func(input, mask, scale)
def forward_torch_softmax(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(sq, sk, b, np):
return scaled_masked_softmax_get_batch_per_block(sq, sk, b, np)
| flash-attention-main | flash_attn/fused_softmax.py |
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py
import torch
import torch.nn as nn
import flash_attn_cuda
def convert_blockmask(blockmask, causal):
"""Convert from the 0-1 format to the format used by the CUDA code.
0 means the block is skipped.
nonzero means the block is not skipped.
Argument:
blockmask: (row, col): a 0-1 tensor
Return:
blockmask_converted: (col, row), dtype torch.int32: for each column, it contains the row
indices of the nonzero blocks, padded with -1 to reach length @row.
The indices are multiplied by 4, with the smallest bit used to encode whether
it is the first nonzero in its row, and the 2nd smallest bit to encode whether it is
the last nonzero in its row..
"""
assert not causal
# TD [2022-05-13]: The indexing and sorting is very tricky
nrow, ncol = blockmask.shape
# Sort does not support bool on CUDA
blockmask = blockmask.to(dtype=torch.uint8)
nonzero_val, nonzero_sorted_rowidx = blockmask.sort(dim=0, stable=True, descending=True)
nonzero_unsorted_rowidx = nonzero_sorted_rowidx.argsort(dim=0)
last_nonzero_col_per_row = blockmask.sort(dim=-1, stable=True).indices[:, -1]
last_nonzero_col_per_row_after_sort = nonzero_unsorted_rowidx[
torch.arange(nrow, device=blockmask.device), last_nonzero_col_per_row
]
first_nonzero_col_per_row = blockmask.sort(dim=-1, stable=True, descending=True).indices[:, 0]
first_nonzero_col_per_row_after_sort = nonzero_unsorted_rowidx[
torch.arange(nrow, device=blockmask.device), first_nonzero_col_per_row
]
nonzero_idx = nonzero_sorted_rowidx * 4
nonzero_idx[last_nonzero_col_per_row_after_sort, last_nonzero_col_per_row] += 2
nonzero_idx[first_nonzero_col_per_row_after_sort, first_nonzero_col_per_row] += 1
nonzero_idx[nonzero_val == 0] = -1
return nonzero_idx.T.contiguous().to(dtype=torch.int32)
def _flash_blocksparse_attn_forward(qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale,
causal, return_softmax):
context, softmax_lse, *rest = flash_attn_cuda.fwd_block(qkv, cu_seqlens, blockmask, dropout_p,
max_s, softmax_scale, causal,
return_softmax, None)
# if context.isnan().any() or softmax_lse.isnan().any():
# breakpoint()
S_dmask = rest[0] if return_softmax else None
return context, softmax_lse, S_dmask
def _flash_blocksparse_attn_backward(dout, qkv, out, S_dmask, softmax_lse, cu_seqlens, blockmask,
dropout_p, max_s, softmax_scale, causal):
dqkv, dp, softmax_d = flash_attn_cuda.bwd_block(dout, qkv, out, S_dmask, softmax_lse, cu_seqlens,
blockmask, dropout_p, softmax_scale, max_s,
causal, None)
# if dqkv.isnan().any() or softmax_d.isnan().any():
# breakpoint()
return dqkv
class FlashBlocksparseAttnFun(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
context, softmax_lse, S_dmask = _flash_blocksparse_attn_forward(
qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal=causal,
return_softmax=False
)
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state)
ctx.dropout_p = dropout_p
ctx.max_s = max_s
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return context
@staticmethod
def backward(ctx, dout):
qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
# S_dmask is None, temporarily use another tensor just to get it running
dqkv = _flash_blocksparse_attn_backward(
dout, qkv, context, context, softmax_lse, cu_seqlens, blockmask, ctx.dropout_p,
ctx.max_s, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None, None
# We duplicate code to return both the output and the softmax for testing
# Returning both makes backward a bit slower, so we want to keep using the other version for speed.
class FlashBlocksparseAttnFunWithS(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal):
# Save rng_state because the backward pass is gonna regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
context, softmax_lse, S_dmask = _flash_blocksparse_attn_forward(
qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal=causal,
return_softmax=True
)
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state)
ctx.dropout_p = dropout_p
ctx.max_s = max_s
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return context, S_dmask, softmax_lse
@staticmethod
def backward(ctx, dout, _dS_dmask_ignored, _dsoftmax_sum_ignored):
qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dqkv = _flash_blocksparse_attn_backward(
dout, qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, ctx.dropout_p,
ctx.max_s, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None
def flash_blocksparse_attn_func(qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale=None,
causal=False, return_attn_probs=False, convert_mask=True):
"""dropout_p should be set to 0.0 during evaluation
"""
func = FlashBlocksparseAttnFun if not return_attn_probs else FlashBlocksparseAttnFunWithS
if convert_mask:
blockmask = convert_blockmask(blockmask, causal=causal)
return func.apply(qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal)
| flash-attention-main | flash_attn/flash_blocksparse_attn_interface.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
import hydra
from flash_attn.flash_blocksparse_attn_interface import flash_blocksparse_attn_func
from flash_attn.flash_blocksparse_attn_interface import convert_blockmask
from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis
class FlashBlocksparseAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, sparsity_config, softmax_temp=None, attention_dropout=0.0,
max_seq_length=2048, device=None, dtype=None):
super().__init__()
self.sparsity_config = hydra.utils.instantiate(sparsity_config)
self.softmax_temp = softmax_temp
self.dropout_p = attention_dropout
# initialize sparse layout and register as buffer
max_seq_length = ((max_seq_length + 256 - 1) // 256) * 256
layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("layout", layout)
blockmask_converted = convert_blockmask(self.layout, causal=False)
self.register_buffer("blockmask_converted", blockmask_converted)
# logger.info(f'Attention class {self.__class__}: saving={self.layout.float().mean()}')
def forward(self, qkv, attn_mask=None, key_padding_mask=None, causal=False, cu_seqlens=None,
max_s=None, need_weights=False, convert_mask=True):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
key_padding_mask: An implementation of BaseMask that encodes how
many query each sequence in the batch consists of
"""
assert not need_weights
assert attn_mask is None
assert qkv.dtype == torch.float16
assert qkv.is_cuda
if cu_seqlens is None:
batch_size = qkv.shape[0]
seqlen = qkv.shape[1]
# Convert mask to take a subset
seqlen_rounded = ((seqlen + 256 - 1) // 256) * 256
assert seqlen_rounded // 16 <= self.layout.shape[0], seqlen_rounded // 256 <= self.layout.shape[1]
blockmask = self.layout[:seqlen_rounded // 16, :seqlen_rounded // 256]
if key_padding_mask is None:
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = seqlen
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=qkv.device)
output = flash_blocksparse_attn_func(
qkv, cu_seqlens, blockmask, self.dropout_p if self.training else 0.0,
max_s, softmax_scale=self.softmax_temp, causal=causal
)
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
else:
key_padding_mask_bool = key_padding_mask.bool_matrix
nheads = qkv.shape[-2]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask_bool)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_blocksparse_attn_func(
x_unpad, cu_seqlens, blockmask, self.dropout_p if self.training else 0.0,
max_s, softmax_scale=self.softmax_temp, causal=causal
)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
indices, batch_size, seqlen),
'b s (h d) -> b s h d', h=nheads)
else:
assert max_s is not None
seqlen = max_s
# Convert mask to take a subset
seqlen_rounded = ((seqlen + 256 - 1) // 256) * 256
assert seqlen_rounded // 16 <= self.layout.shape[0], seqlen_rounded // 256 <= self.layout.shape[1]
blockmask = self.layout[:seqlen_rounded // 16, :seqlen_rounded // 256]
if convert_mask:
output = flash_blocksparse_attn_func(
qkv, cu_seqlens, blockmask, self.dropout_p if self.training else 0.0,
max_s, softmax_scale=self.softmax_temp, causal=causal
)
else:
output = flash_blocksparse_attn_func(
qkv, cu_seqlens, self.blockmask_converted, self.dropout_p if self.training else 0.0,
max_s, softmax_scale=self.softmax_temp, causal=causal,
convert_mask=False,
)
return output, None
class FlashBlocksparseMHA(nn.Module):
def __init__(self, embed_dim, num_heads, sparsity_config, bias=True, batch_first=True,
attention_dropout=0.0, causal=False, max_seq_length=2048,
device=None, dtype=None, **kwargs) -> None:
assert batch_first
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
assert self.head_dim in [16, 32, 64], "Only support head_dim == 16, 32, or 64"
self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
self.inner_attn = FlashBlocksparseAttention(
sparsity_config, attention_dropout=attention_dropout,
max_seq_length=max_seq_length, **factory_kwargs
)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
def forward(self, x, x_ignored_, x_ignored_1_, attn_mask=None, key_padding_mask=None,
need_weights=False):
qkv = self.Wqkv(x)
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
context, attn_weights = self.inner_attn(qkv, key_padding_mask=key_padding_mask,
need_weights=need_weights, causal=self.causal)
return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights
| flash-attention-main | flash_attn/flash_blocksparse_attention.py |
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
assert input.ndim >= 2
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
second_dim = other_shape.numel()
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# return input[indices]
return torch.gather(rearrange(input, 'b ... -> b (...)'), 0,
repeat(indices, 'z -> z d', d=second_dim)).reshape(-1, *other_shape)
@staticmethod
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
assert grad_output.ndim >= 2
other_shape = grad_output.shape[1:]
grad_output = rearrange(grad_output, 'b ... -> b (...)')
grad_input = torch.zeros([ctx.first_axis_dim, grad_output.shape[1]],
device=grad_output.device, dtype=grad_output.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
grad_input.scatter_(0, repeat(indices, 'z -> z d', d=grad_output.shape[1]), grad_output)
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
index_first_axis = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim >= 2
output = torch.zeros(first_axis_dim, *values.shape[1:], device=values.device,
dtype=values.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
output[indices] = values
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
return output
@staticmethod
def backward(ctx, grad_output):
indices, = ctx.saved_tensors
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
grad_values = grad_output[indices]
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
class IndexFirstAxisResidual(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
assert input.ndim >= 2
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
second_dim = other_shape.numel()
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
output = input[indices]
# We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
# memory format to channel_first. In other words, input might not be contiguous.
# If we don't detach, Pytorch complains about output being a view and is being modified inplace
return output, input.detach()
@staticmethod
def backward(ctx, grad_output, grad_residual):
indices, = ctx.saved_tensors
assert grad_output.ndim >= 2
other_shape = grad_output.shape[1:]
assert grad_residual.shape[1:] == other_shape
grad_input = grad_residual
# grad_input[indices] += grad_output
indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
indices = indices.expand_as(grad_output)
grad_input.scatter_add_(0, indices, grad_output)
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
index_first_axis_residual = IndexFirstAxisResidual.apply
def unpad_input(hidden_states, attention_mask):
"""
Arguments:
hidden_states: (batch, seqlen, ...)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
Return:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
"""
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
return (index_first_axis(rearrange(hidden_states, 'b s ... -> (b s) ...'), indices), indices,
cu_seqlens, max_seqlen_in_batch)
def pad_input(hidden_states, indices, batch, seqlen):
"""
Arguments:
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz)
Return:
hidden_states: (batch, seqlen, ...)
"""
dim = hidden_states.shape[-1]
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
# output[indices] = hidden_states
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, '(b s) ... -> b s ...', b=batch)
| flash-attention-main | flash_attn/bert_padding.py |
flash-attention-main | flash_attn/__init__.py |
|
# [2022-10-23] Downloaded from https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
# for benchmarking.
# We fixed a few dtype cast to make it work for bf16
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention algorithm
(see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf)
"""
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
TMP, L, M, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on,
Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
off_k = off_hz * stride_qh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
# Initialize pointers to Q, K, V
q_ptrs = Q + off_q
k_ptrs = K + off_k
v_ptrs = V + off_v
# initialize pointer to m and l
t_ptrs = TMP + off_hz * N_CTX + offs_m
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# load q: it will stay in SRAM throughout
q = tl.load(q_ptrs)
# loop over k, v and update accumulator
for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(k_ptrs + start_n * stride_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
qk *= sm_scale
qk += tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), 0, float("-inf"))
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
# -- update output accumulator --
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(v_ptrs + start_n * stride_vk)
p = p.to(v.dtype)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# rematerialize offsets to save registers
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
# write back l and m
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_DMODEL)
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
out_ptrs = Out + off_o
tl.store(out_ptrs, acc)
@triton.jit
def _bwd_preprocess(
Out, DO, L,
NewDO, Delta,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
# load
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
denom = tl.load(L + off_m).to(tl.float32)
# compute
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do)
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L, M,
D,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
Z, H, N_CTX,
num_block,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
# offset pointers for batch/head
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
lo = start_n * BLOCK_M
# initialize row/col offsets
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX
m_ptrs = M + off_hz * N_CTX
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# k and v stay in SRAM throughout
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
# loop over rows
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
q = tl.load(q_ptrs)
# recompute p = softmax(qk, dim=-1).T
# NOTE: `do` is pre-divided by `l`; no normalization here
qk = tl.dot(q, k, trans_b=True)
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
m = tl.load(m_ptrs + offs_m_curr)
p = tl.exp(qk * sm_scale - m[:, None])
# compute dv
do = tl.load(do_ptrs)
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
# compute dp = dot(v, do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, v, trans_b=True)
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(ds.to(q.dtype), q, trans_a=True)
# # compute dq
dq = tl.load(dq_ptrs, eviction_policy="evict_last")
dq += tl.dot(ds.to(k.dtype), k)
tl.store(dq_ptrs, dq, eviction_policy="evict_last")
# # increment pointers
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, sm_scale):
BLOCK = 128
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q, k, v, sm_scale,
tmp, L, m,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q.shape[0], q.shape[1], q.shape[2],
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=Lk, num_warps=num_warps,
num_stages=1,
)
ctx.save_for_backward(q, k, v, o, L, m)
ctx.BLOCK = BLOCK
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, l, m = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
do_scaled = torch.empty_like(do)
delta = torch.empty_like(l)
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )](
o, do, l,
do_scaled, delta,
BLOCK_M=ctx.BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
# NOTE: kernel currently buggy for other values of `num_warps`
num_warps = 8
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do_scaled,
dq, dk, dv,
l, m,
delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2],
ctx.grid[0],
BLOCK_M=ctx.BLOCK, BLOCK_N=ctx.BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=num_warps,
num_stages=1,
)
return dq.to(q.dtype), dk, dv, None
attention = _attention.apply
| flash-attention-main | flash_attn/flash_attn_triton_og.py |
import math
import torch
import torch.nn as nn
from einops import rearrange
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis
class FlashAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
"""
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
super().__init__()
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
max_s=None, need_weights=False):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
if unpadded: (nnz, 3, h, d)
key_padding_mask: a bool tensor of shape (B, S)
"""
assert not need_weights
assert qkv.dtype in [torch.float16, torch.bfloat16]
assert qkv.is_cuda
if cu_seqlens is None:
batch_size = qkv.shape[0]
seqlen = qkv.shape[1]
if key_padding_mask is None:
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = seqlen
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
indices, batch_size, seqlen),
'b s (h d) -> b s h d', h=nheads)
else:
assert max_s is not None
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=causal
)
return output, None
class FlashMHA(nn.Module):
def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0,
causal=False, device=None, dtype=None, **kwargs) -> None:
assert batch_first
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8"
self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
self.inner_attn = FlashAttention(attention_dropout=attention_dropout, **factory_kwargs)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
def forward(self, x, key_padding_mask=None, need_weights=False):
"""x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim)
key_padding_mask: bool tensor of shape (batch, seqlen)
"""
qkv = self.Wqkv(x)
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
context, attn_weights = self.inner_attn(qkv, key_padding_mask=key_padding_mask,
need_weights=need_weights, causal=self.causal)
return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights
| flash-attention-main | flash_attn/flash_attention.py |
"""
*Experimental* implementation of FlashAttention in Triton.
We use the FlashAttention implementation from Phil Tillet a starting point.
https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
Changes:
- Implement both causal and non-causal attention.
- Implement both self-attention and cross-attention.
- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
- Support attention bias.
- Speed up the forward pass a bit, and only store the LSE instead of m and l.
- Make the backward for d=128 much faster by reducing register spilling.
- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
small batch size * nheads.
Caution:
- This is an *experimental* implementation. The forward pass should be quite robust but
I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
- This implementation has only been tested on A100.
- If you plan to use headdim other than 64 and 128, you should test for race conditions
(due to the Triton compiler), as done in tests/test_flash_attn.py
"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
that there are none left for other head dimensions.
Differences between this Triton version and the CUDA version:
- Triton version doesn't support dropout.
- Triton forward is generally faster than CUDA forward, while Triton backward is
generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
than CUDA forward + backward.
- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
- Triton version supports attention bias, while CUDA version doesn't.
"""
import math
import torch
from einops import rearrange, repeat
import triton
import triton.language as tl
# Disabling autotune for now, set num_warps=4 if headdim=64 and num_warps=8 if headdim=128
# @triton.autotune(
# configs=[
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 128}, num_warps=4, num_stages=1),
# # This config has a race condition when EVEN_M == False, disabling it for now.
# # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64}, num_warps=4, num_stages=1),
# ],
# key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM']
# )
@triton.heuristics(
{
"EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
"EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
"EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
}
)
@triton.jit
def _fwd_kernel(
Q, K, V, Bias, Out,
Lse, TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
softmax_scale,
stride_qb, stride_qh, stride_qm,
stride_kb, stride_kh, stride_kn,
stride_vb, stride_vh, stride_vn,
stride_bb, stride_bh, stride_bm,
stride_ob, stride_oh, stride_om,
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# off_b = tl.program_id(1)
# off_h = tl.program_id(2)
# off_hb = off_b * nheads + off_h
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# Initialize pointers to Q, K, V
# Adding parenthesis around indexing might use int32 math instead of int64 math?
# https://github.com/openai/triton/issues/741
# I'm seeing a tiny bit of difference (5-7us)
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :])
# initialize pointer to m and l
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
# load q: it will stay in SRAM throughout
# [2022-10-30] TD: Triton bug - in the case of EVEN_M=True and EVEN_N=False, if we just call
# tl.load(q_ptrs), we get the wrong output!
if EVEN_M & EVEN_N:
if EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0)
# loop over k, v and update accumulator
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0)
else:
k = tl.load(k_ptrs + start_n * stride_kn,
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
# Trying to combine the two masks seem to make the result wrong
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf"))
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf"))
if BIAS_TYPE != 'none':
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n, mask=(start_n + offs_n) < seqlen_k, other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n,
mask=(offs_m[:, None] < seqlen_q)
& ((start_n + offs_n)[None, :] < seqlen_k),
other=0.0).to(tl.float32)
# Slightly faster to multiply the softmax_scale in the tl.exp below since the compiler
# can then fuse the mult and add into an fma instruction. But if we have bias we need to
# to multiply with softmax_scale here.
qk = qk * softmax_scale + bias
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
p = tl.exp(qk - m_ij[:, None])
else:
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
# scale acc_o
acc_o_scale = tl.exp(m_i - m_ij)
# # -- update output accumulator --
# BUG: have to store and immediately load
tl.store(t_ptrs, acc_o_scale)
acc_o_scale = tl.load(t_ptrs)
acc_o = acc_o * acc_o_scale[:, None]
# update acc_o
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0)
else:
v = tl.load(v_ptrs + start_n * stride_vn,
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
# -- update statistics
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
# BUG: have to store and immediately load
tl.store(t_ptrs, o_scale)
o_scale = tl.load(t_ptrs)
acc_o = acc_o * o_scale[:, None]
# rematerialize offsets to save registers
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
# write back l and m
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_n[None, :])
if EVEN_M:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
else:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
else:
tl.store(out_ptrs, acc_o,
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
@triton.jit
def _bwd_preprocess_do_o_dot(
Out, DO, Delta,
stride_ob, stride_oh, stride_om,
stride_dob, stride_doh, stride_dom,
nheads, seqlen_q, seqlen_q_rounded, headdim,
BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# load
o = tl.load(Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
do = tl.load(DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
@triton.jit
def _bwd_store_dk_dv(
dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
):
# [2022-11-01] TD: Same bug. In the case of EVEN_N=True and EVEN_M=False,
# if we just call tl.store(dv_ptrs), there's a race condition
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
else:
tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
else:
if EVEN_HEADDIM:
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
else:
tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
@triton.jit
def _bwd_kernel_one_col_block(
start_n,
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qm, stride_kn, stride_vn, stride_bm,
stride_dom, stride_dqm, stride_dkn, stride_dvn,
seqlen_q, seqlen_k, headdim,
ATOMIC_ADD: tl.constexpr,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
):
# We need to make sure begin_m is a multiple of BLOCK_M (not BLOCK_N)
begin_m = 0 if not IS_CAUSAL else ((start_n * BLOCK_N) // BLOCK_M) * BLOCK_M
# initialize row/col offsets
offs_qm = begin_m + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
offs_m = tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
if BIAS_TYPE == 'vector':
b_ptrs = Bias + offs_n
elif BIAS_TYPE == 'matrix':
b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
# initialize dv and dk
dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
# There seems to be some problem with Triton pipelining that makes results wrong for
# headdim=64, seqlen=(113, 255), bias_type='matrix'. In this case the for loop
# may have zero step, and pipelining with the bias matrix could screw it up.
# So we just exit early.
if begin_m >= seqlen_q:
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
return
# k and v stay in SRAM throughout
# [2022-10-30] TD: Same bug as the fwd. In the case of EVEN_N=True and EVEN_M=False,
# if we just call tl.load(k_ptrs), we get the wrong output!
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
else:
k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
else:
if EVEN_HEADDIM:
k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
else:
k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0)
# loop over rows
num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
start_m = tl.multiple_of(start_m, BLOCK_M)
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
# Same bug as below. Otherwise gives wrong result for headdim=40, seqlen=(128, 117)
if EVEN_M & EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
if EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
& (offs_d[None, :] < headdim), other=0.0)
# recompute p = softmax(qk, dim=-1).T
qk = tl.dot(q, k, trans_b=True)
# Trying to combine the two masks seem to make the result wrong
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
qk = tl.where(offs_n[None, :] < seqlen_k, qk, float("-inf"))
if IS_CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
if BIAS_TYPE != 'none':
tl.debug_barrier() # Race condition otherwise
if BIAS_TYPE == 'vector':
if EVEN_N:
bias = tl.load(b_ptrs).to(tl.float32)
else:
bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == 'matrix':
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs).to(tl.float32)
else:
bias = tl.load(b_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q)
& (offs_n[None, :] < seqlen_k),
other=0.0).to(tl.float32)
qk = qk * softmax_scale + bias
# There seems to be a race condition when headdim=48/96, and dq, dk, dv are wrong.
# Also wrong for headdim=64.
if not (EVEN_M & EVEN_HEADDIM):
tl.debug_barrier()
lse_i = tl.load(LSE + offs_m_curr)
if BIAS_TYPE == 'none':
p = tl.exp(qk * softmax_scale - lse_i[:, None])
else:
p = tl.exp(qk - lse_i[:, None])
# compute dv
# [2022-10-30] TD: A Triton bug: if EVEN_M=True and EVEN_HEADDIM=False, if we call
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0), we get wrong outputs
# in the case of headdim=48/96, seqlen_q & seqlen_k >= 512. If headdim=40 or seqlen < 512,
# the output is correct.
if EVEN_M & EVEN_HEADDIM:
do = tl.load(do_ptrs)
else:
# [2022-11-01] TD: Triton bug, there's a race condition if we just use m_mask and not d_mask.
do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
& (offs_d[None, :] < headdim), other=0.0)
# if EVEN_M:
# if EVEN_HEADDIM:
# do = tl.load(do_ptrs)
# else:
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
# else:
# if EVEN_HEADDIM:
# do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
# else:
# do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
# & (offs_d[None, :] < headdim), other=0.0)
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
# compute dp = dot(v, do)
# There seems to be a race condition when headdim=48/96, and dq, dk are wrong.
# Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True
# Also wrong for headdim=64, seqlen=(1023, 1024), and ATOMIC_ADD=False
if not (EVEN_M & EVEN_HEADDIM):
tl.debug_barrier()
dp = tl.dot(do, v, trans_b=True)
# There's a race condition for headdim=48
if not EVEN_HEADDIM:
tl.debug_barrier()
# compute ds = p * (dp - delta[:, None])
# Putting the subtraction after the dp matmul (instead of before) is slightly faster
Di = tl.load(D + offs_m_curr)
# Converting ds to q.dtype here reduces register pressure and makes it much faster
# for BLOCK_HEADDIM=128
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
# compute dk = dot(ds.T, q)
dk += tl.dot(ds, q, trans_a=True)
# compute dq
if not (EVEN_M & EVEN_HEADDIM): # Otherewise there's a race condition when BIAS_TYPE='matrix'
tl.debug_barrier()
if not ATOMIC_ADD:
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
dq = tl.load(dq_ptrs, eviction_policy="evict_last")
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq, eviction_policy="evict_last")
else:
if EVEN_HEADDIM:
dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0,
eviction_policy="evict_last")
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q,
eviction_policy="evict_last")
else:
dq = tl.load(dq_ptrs,
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0, eviction_policy="evict_last")
dq += tl.dot(ds, k)
tl.store(dq_ptrs, dq,
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
eviction_policy="evict_last")
else: # If we're parallelizing across the seqlen_k dimension
dq = tl.dot(ds, k)
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
tl.atomic_add(dq_ptrs, dq)
else:
if EVEN_HEADDIM:
tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
else:
tl.atomic_add(dq_ptrs, dq,
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim))
# increment pointers
dq_ptrs += BLOCK_M * stride_dqm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_dom
if BIAS_TYPE == 'matrix':
b_ptrs += BLOCK_M * stride_bm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
_bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
@triton.autotune(
configs=[
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# Other configs seem to give wrong results when seqlen_q % 128 != 0, disabling them for now
# # Kernel is buggy (give wrong result) if we set BLOCK_m=128, BLOCK_n=64, num_warps=*4*
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
],
key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'],
)
@triton.heuristics(
{
"EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
"EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
"EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
}
)
@triton.jit
def _bwd_kernel(
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qb, stride_qh, stride_qm,
stride_kb, stride_kh, stride_kn,
stride_vb, stride_vh, stride_vn,
stride_bb, stride_bh, stride_bm,
stride_dob, stride_doh, stride_dom,
stride_dqb, stride_dqh, stride_dqm,
stride_dkb, stride_dkh, stride_dkn,
stride_dvb, stride_dvh, stride_dvn,
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim,
CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
):
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
# offset pointers for batch/head
Q += off_b * stride_qb + off_h * stride_qh
K += off_b * stride_kb + off_h * stride_kh
V += off_b * stride_vb + off_h * stride_vh
DO += off_b * stride_dob + off_h * stride_doh
DQ += off_b * stride_dqb + off_h * stride_dqh
DK += off_b * stride_dkb + off_h * stride_dkh
DV += off_b * stride_dvb + off_h * stride_dvh
if BIAS_TYPE != 'none':
Bias += off_b * stride_bb + off_h * stride_bh
# pointer to row-wise quantities in value-like data
D += off_hb * seqlen_q_rounded
LSE += off_hb * seqlen_q_rounded
if not SEQUENCE_PARALLEL:
num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
for start_n in range(0, num_block_n):
_bwd_kernel_one_col_block(
start_n,
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qm, stride_kn, stride_vn, stride_bm,
stride_dom, stride_dqm, stride_dkn, stride_dvn,
seqlen_q, seqlen_k, headdim,
ATOMIC_ADD=False,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N
)
else:
start_n = tl.program_id(0)
_bwd_kernel_one_col_block(
start_n,
Q, K, V, Bias,
DO, DQ, DK, DV,
LSE, D,
softmax_scale,
stride_qm, stride_kn, stride_vn, stride_bm,
stride_dom, stride_dqm, stride_dkn, stride_dvn,
seqlen_q, seqlen_k, headdim,
ATOMIC_ADD=True,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N
)
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
# shape constraints
batch, seqlen_q, nheads, d = q.shape
_, seqlen_k, _, _ = k.shape
assert k.shape == (batch, seqlen_k, nheads, d)
assert v.shape == (batch, seqlen_k, nheads, d)
assert d <= 128, 'FlashAttention only support head dimensions up to 128'
assert q.dtype == k.dtype == v.dtype, 'All tensors must have the same type'
assert q.dtype in [torch.float16, torch.bfloat16], 'Only support fp16 and bf16'
assert q.is_cuda and k.is_cuda and v.is_cuda
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
has_bias = bias is not None
bias_type = 'none'
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
if bias.stride(-1) != 1:
bias = bias.contiguous()
if bias.shape[2:] == (1, seqlen_k):
bias_type = 'vector'
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = 'matrix'
else:
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)'
' or (seqlen_q, seqlen_k)')
if bias.shape[:2] == (1, nheads):
bias = repeat(bias, '1 h ... -> b h ...', b=batch)
elif bias.shape[:2] == (batch, 1):
bias = repeat(bias, 'b 1 ... -> b h ...', h=nheads)
assert bias.shape[:2] == (batch, nheads), 'First 2 dimensions of bias must be broadcastible to (batch, nheads)'
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
o = torch.empty_like(q)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
num_warps = 4 if d <= 64 else 8
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
_fwd_kernel[grid](
q, k, v, bias, o,
lse, tmp,
softmax_scale,
q.stride(0), q.stride(2), q.stride(1),
k.stride(0), k.stride(2), k.stride(1),
v.stride(0), v.stride(2), v.stride(1),
*bias_strides,
o.stride(0), o.stride(2), o.stride(1),
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d,
seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations)
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
bias_type, causal, BLOCK_HEADDIM,
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
num_warps=num_warps,
num_stages=1,
)
return o, lse, softmax_scale # softmax_scale could have been updated
def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None):
# Make sure that the last dimension is contiguous
if do.stride(-1) != 1:
do = do.contiguous()
batch, seqlen_q, nheads, d = q.shape
_, seqlen_k, _, _ = k.shape
# assert d in {16, 32, 64, 128}
assert d <= 128
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
assert lse.shape == (batch, nheads, seqlen_q_rounded)
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
# dq_accum = torch.zeros_like(q, dtype=torch.float32)
dq_accum = torch.empty_like(q, dtype=torch.float32)
delta = torch.empty_like(lse)
# delta = torch.zeros_like(lse)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
_bwd_preprocess_do_o_dot[grid](
o, do, delta,
o.stride(0), o.stride(2), o.stride(1),
do.stride(0), do.stride(2), do.stride(1),
nheads, seqlen_q, seqlen_q_rounded, d,
BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM,
)
has_bias = bias is not None
bias_type = 'none'
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
assert bias.stride(-1) == 1
if bias.shape[2:] == (1, seqlen_k):
bias_type = 'vector'
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = 'matrix'
else:
raise RuntimeError('Last 2 dimensions of bias must be (1, seqlen_k)'
' or (seqlen_q, seqlen_k)')
if bias.shape[:2] == (1, nheads):
bias = repeat(bias, '1 h ... -> b h ...', b=batch)
elif bias.shape[:2] == (batch, 1):
bias = repeat(bias, 'b 1 ... -> b h ...', h=nheads)
assert bias.shape[:2] == (batch, nheads), 'First 2 dimensions of bias must be broadcastible to (batch, nheads)'
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
# BLOCK_M = 128
# BLOCK_N = 64
# num_warps = 4
grid = lambda META: (triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1,
batch * nheads)
_bwd_kernel[grid](
q, k, v, bias,
do, dq_accum, dk, dv,
lse, delta,
softmax_scale,
q.stride(0), q.stride(2), q.stride(1),
k.stride(0), k.stride(2), k.stride(1),
v.stride(0), v.stride(2), v.stride(1),
*bias_strides,
do.stride(0), do.stride(2), do.stride(1),
dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1),
dk.stride(0), dk.stride(2), dk.stride(1),
dv.stride(0), dv.stride(2), dv.stride(1),
nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d,
seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations)
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
bias_type, causal, BLOCK_HEADDIM,
# SEQUENCE_PARALLEL=False,
# BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N,
# num_warps=num_warps,
# num_stages=1,
)
dq.copy_(dq_accum)
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
"""
qkv: (batch, seqlen, 3, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
"""
# Make sure that the last dimension is contiguous
if qkv.stride(-1) != 1:
qkv = qkv.contiguous()
o, lse, ctx.softmax_scale = _flash_attn_forward(
qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal,
softmax_scale=softmax_scale
)
ctx.save_for_backward(qkv, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
qkv, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[1], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dqkv = torch.empty_like(qkv)
_flash_attn_backward(do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse,
dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2],
bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
return dqkv, None, None, None
flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
class FlashAttnKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
"""
q: (batch, seqlen_q, nheads, headdim)
kv: (batch, seqlen_k, 2, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
"""
# Make sure that the last dimension is contiguous
q, kv = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale
)
ctx.save_for_backward(q, kv, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, kv, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[2], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
_flash_attn_backward(do, q, qkv[:, :, 0], qkv[:, :, 1], o, lse,
dq, dkv[:, :, 0], dkv[:, :, 1],
bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
return dq, dkv, None, None, None
flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
"""
q: (batch_size, seqlen_q, nheads, headdim)
k, v: (batch_size, seqlen_k, nheads, headdim)
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
"""
# Make sure that the last dimension is contiguous
q, k, v = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
o, lse, ctx.softmax_scale = _flash_attn_forward(
q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale
)
ctx.save_for_backward(q, k, v, o, lse, bias)
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, lse, bias = ctx.saved_tensors
assert not ctx.needs_input_grad[3], 'FlashAttention does not support bias gradient yet'
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
with torch.inference_mode():
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
_flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv,
bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale)
return dq, dk, dv, None, None, None
flash_attn_func = FlashAttnFunc.apply
| flash-attention-main | flash_attn/flash_attn_triton.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import flash_attn_cuda
def _get_block_size(device, head_dim, is_dropout):
assert head_dim % 8 == 0 and head_dim <= 128
return 256 if head_dim <= 64 else 128
def _flash_attn_forward(q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal, return_softmax, num_splits=0,
generator=None):
"""
num_splits: how much to parallelize over the seqlen_q dimension. num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking.
Don't change it unless you know what you're doing.
"""
softmax_lse, *rest = flash_attn_cuda.fwd(
q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, False, causal, return_softmax, num_splits, generator
)
# if out.isnan().any() or softmax_lse.isnan().any():
# breakpoint()
S_dmask = rest[0] if return_softmax else None
return out, softmax_lse, S_dmask
def _flash_attn_backward(dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, num_splits=0,
generator=None):
"""
num_splits: whether to parallelize over the seqlen_k dimension (num_splits > 1) or
not (num_splits = 1). num_splits=0 means it will be set by an internal heuristic.
Any value above 1 will call the same kernel (i.e. num_splits=2 would call the same kernel
as num_splits=3), so effectively the choices are 0, 1, and 2.
This hyperparameter can be tuned for performance, but default value (heuristic) should work fine.
"""
_, _, _, softmax_d = flash_attn_cuda.bwd(
dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, False, causal, num_splits, generator)
# if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
# breakpoint()
return dq, dk, dv, softmax_d
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale, causal, return_softmax):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out, softmax_lse, S_dmask = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], torch.empty_like(qkv[:, 0]), cu_seqlens, cu_seqlens,
max_seqlen, max_seqlen, dropout_p, softmax_scale, causal=causal,
return_softmax=return_softmax
)
ctx.save_for_backward(qkv, out, softmax_lse, cu_seqlens, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen = max_seqlen
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens, cu_seqlens,
ctx.max_seqlen, ctx.max_seqlen, ctx.dropout_p, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None
class FlashAttnKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, causal, return_softmax):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, S_dmask = _flash_attn_forward(
q, kv[:, 0], kv[:, 1], torch.empty_like(q), cu_seqlens_q, cu_seqlens_k, max_seqlen_q,
max_seqlen_k, dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax
)
ctx.save_for_backward(q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
_flash_attn_backward(
dout, q, kv[:, 0], kv[:, 1], out, softmax_lse,
dq, dkv[:, 0], dkv[:, 1], cu_seqlens_q, cu_seqlens_k,
ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dq, dkv, None, None, None, None, None, None, None, None
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p,
softmax_scale, causal, return_softmax):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, S_dmask = _flash_attn_forward(
q, k, v, torch.empty_like(q), cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax
)
ctx.save_for_backward(q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
_flash_attn_backward(
dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k,
ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dq, dk, dv, None, None, None, None, None, None, None, None
class FlashAttnQKVPackedSplitFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0, dropout_p,
softmax_scale, causal, return_softmax):
# Save rng_state because the backward pass will regenerate the dropout mask
if dropout_p > 0:
rng_state0 = torch.cuda.get_rng_state()
generator1 = torch.Generator(device='cuda')
rng_state1 = generator1.get_state()
else:
rng_state0, generator1, rng_state1 = None, None, None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out = torch.empty_like(qkv[:, 0])
_, softmax_lse0, S_dmask0 = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], out, cu_seqlens[:batch_size0 + 1],
cu_seqlens[:batch_size0 + 1], max_seqlen0, max_seqlen0, dropout_p, softmax_scale,
causal=causal, return_softmax=return_softmax
)
s = torch.cuda.Stream()
with torch.cuda.stream(s):
_, softmax_lse1, S_dmask1 = _flash_attn_forward(
qkv[:, 0], qkv[:, 1], qkv[:, 2], out, cu_seqlens[batch_size0:],
cu_seqlens[batch_size0:], max_seqlen1, max_seqlen1, dropout_p, softmax_scale,
causal=causal, return_softmax=return_softmax, generator=generator1
)
torch.cuda.current_stream().wait_stream(s)
ctx.save_for_backward(qkv, out, softmax_lse0, softmax_lse1, cu_seqlens,
rng_state0, rng_state1)
ctx.dropout_p = dropout_p
ctx.max_seqlen0 = max_seqlen0
ctx.max_seqlen1 = max_seqlen1
ctx.batch_size0 = batch_size0
ctx.softmax_scale = softmax_scale
ctx.causal = causal
if not return_softmax:
return out
else:
max_seqlen_q = max(softmax_lse0.shape[2], softmax_lse1.shape[2])
max_seqlen_k = max(S_dmask0.shape[3], S_dmask1.shape[3])
softmax_lse = torch.cat([F.pad(softmax_lse0, (0, max_seqlen_q - softmax_lse0.shape[2])),
F.pad(softmax_lse1, (0, max_seqlen_q - softmax_lse1.shape[2]))],
dim=0)
return out, softmax_lse, S_dmask0, S_dmask1
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse0, softmax_lse1, cu_seqlens, rng_state0, rng_state1 = ctx.saved_tensors
batch_size0 = ctx.batch_size0
if rng_state0 is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state0)
if rng_state1 is not None:
generator1 = torch.Generator(device='cuda')
generator1.set_state(rng_state1)
else:
generator1 = None
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse0,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens[:batch_size0 + 1],
cu_seqlens[:batch_size0 + 1], ctx.max_seqlen0, ctx.max_seqlen0, ctx.dropout_p,
ctx.softmax_scale, ctx.causal
)
s = torch.cuda.Stream()
with torch.cuda.stream(s):
_flash_attn_backward(
dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse1,
dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens[batch_size0:],
cu_seqlens[batch_size0:], ctx.max_seqlen1, ctx.max_seqlen1, ctx.dropout_p,
ctx.softmax_scale, ctx.causal, generator=generator1
)
torch.cuda.current_stream().wait_stream(s)
if rng_state0 is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None, None, None
def flash_attn_unpadded_qkvpacked_func(qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale=None,
causal=False, return_attn_probs=False):
"""dropout_p should be set to 0.0 during evaluation
Arguments:
qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into qkv.
max_seqlen: int. Maximum sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnQKVPackedFunc.apply(qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale,
causal, return_attn_probs)
def flash_attn_unpadded_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale=None, causal=False,
return_attn_probs=False):
"""dropout_p should be set to 0.0 during evaluation
Arguments:
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
kv: (total_k, 2, nheads, headdim), where total_k = total number of key tokens in the batch.
cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into q.
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into kv.
max_seqlen_q: int. Maximum query sequence length in the batch.
max_seqlen_k: int. Maximum key sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnKVPackedFunc.apply(q, kv, cu_seqlens_q, cu_seqlens_k,
max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal,
return_attn_probs)
def flash_attn_unpadded_func(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale=None, causal=False, return_attn_probs=False):
"""dropout_p should be set to 0.0 during evaluation
Arguments:
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
k: (total_k, nheads, headdim), where total_k = total number of key tokens in the batch.
v: (total_k, nheads, headdim), where total_k = total number of key tokens in the batch.
cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into q.
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into kv.
max_seqlen_q: int. Maximum query sequence length in the batch.
max_seqlen_k: int. Maximum key sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnFunc.apply(q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k,
dropout_p, softmax_scale, causal, return_attn_probs)
def flash_attn_unpadded_qkvpacked_split_func(
qkv, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0, dropout_p, softmax_scale=None,
causal=False, return_attn_probs=False):
"""
Split attention into 2 kernels running on 2 separate streams for performance reason:
e.g., if the batch has some sequences of length <= 128 and some > 128, it might be faster to
have one kernel dealing with seqlen <= 128 and one kernel for seqlen > 128.
dropout_p should be set to 0.0 during evaluation.
Arguments:
qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into qkv.
max_seqlen0: int. Maximum sequence length in 1st part of the batch.
max_seqlen1: int. Maximum sequence length in 2nd part of the batch.
batch_size0: int. Number of sequences in the 1st part of the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
Return:
out: (total, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnQKVPackedSplitFunc.apply(qkv, cu_seqlens, max_seqlen0, max_seqlen1, batch_size0,
dropout_p, softmax_scale, causal, return_attn_probs)
def flash_attn_func(qkv, cu_seqlens, dropout_p, max_s, softmax_scale=None, causal=False,
return_attn_probs=False):
"""For backward-compatibility only, will remove soon.
dropout_p should be set to 0.0 during evaluation
"""
return flash_attn_unpadded_qkvpacked_func(qkv, cu_seqlens, max_s, dropout_p, softmax_scale,
causal, return_attn_probs)
| flash-attention-main | flash_attn/flash_attn_interface.py |
import torch
import torch.nn as nn
import xentropy_cuda_lib
# https://github.com/NVIDIA/apex/blob/master/apex/contrib/xentropy/softmax_xentropy.py
class SoftmaxCrossEntropyLossFn(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, inplace_backward=False):
losses, max_log_sum_exp = xentropy_cuda_lib.forward(
logits, labels, smoothing)
losses.masked_fill_(labels==padding_idx, 0)
ctx.save_for_backward(logits, max_log_sum_exp, labels)
ctx.smoothing = smoothing
ctx.padding_idx = padding_idx
ctx.inplace_backward = inplace_backward
return losses
@staticmethod
def backward(ctx, grad_loss):
logits, max_log_sum_exp, labels = ctx.saved_tensors
if not grad_loss.is_contiguous():
grad_loss = grad_loss.contiguous()
grad_loss.masked_fill_(labels==ctx.padding_idx, 0)
grad_logits = xentropy_cuda_lib.backward(grad_loss, logits, max_log_sum_exp, labels,
ctx.smoothing, ctx.inplace_backward)
return grad_logits, None, None, None, None
class CrossEntropyLossApex(nn.Module):
def __init__(self, ignore_index=-100, reduction='mean', label_smoothing=0.0,
inplace_backward=False):
super().__init__()
if reduction not in ['mean', 'none']:
raise NotImplementedError("Only support reduction = 'mean' or 'none'")
self.ignore_index = ignore_index
self.reduction = reduction
self.label_smoothing = label_smoothing
self.inplace_backward = inplace_backward
def forward(self, input, target):
assert input.is_cuda and target.is_cuda
# SoftmaxCrossEntropyLoss implicitly casts to float
loss = SoftmaxCrossEntropyLossFn.apply(input, target, self.label_smoothing,
self.ignore_index, self.inplace_backward)
if self.reduction == 'mean':
return loss.sum() / (target != self.ignore_index).sum()
else:
return loss
| flash-attention-main | flash_attn/losses/cross_entropy_apex.py |
# Inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/cross_entropy.py
# But we make it much faster: we compute the local loss and the LSE, and by exchanging the LSE and
# the losses we can get the global loss. There's no need to do it step by step
# (compute local max, exchange, compute exp, compute local sum, exchange, etc.)
import torch
import torch.nn as nn
import xentropy_cuda_lib
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.tensor_parallel.utils import VocabUtility
# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
# version of PyTorch. The following 4 lines are for backward comparability with
# older PyTorch.
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
if "reduce_scatter_tensor" not in dir(torch.distributed):
torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base
class SoftmaxCrossEntropyLossParallelFn(torch.autograd.Function):
@staticmethod
def forward(ctx, logits_parallel, labels, smoothing=0.0, ignored_index=-100,
inplace_backward=False):
"""
logits_parallel: (batch, vocab_size / world_size)
labels: (batch,)
"""
assert smoothing == 0.0, 'smoothing != 0.0 is not yet implemented, file an issue if you need it'
batch, partition_vocab_size = logits_parallel.shape
assert labels.shape == (batch,)
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
if world_size == 1:
losses, lse = xentropy_cuda_lib.forward(logits_parallel, labels, smoothing)
losses.masked_fill_(labels==ignored_index, 0)
labels_local = labels
else:
vocab_start_index, vocab_end_index = VocabUtility.vocab_range_from_per_partition_vocab_size(
partition_vocab_size, get_tensor_model_parallel_rank(),
get_tensor_model_parallel_world_size()
)
# Create a mask of valid vocab ids (1 means it needs to be masked).
labels_mask = (labels < vocab_start_index) | (labels >= vocab_end_index)
ignored_mask = labels == ignored_index
labels_local = torch.where(ignored_mask, labels, labels - vocab_start_index)
masked_labels = labels_local.clone()
masked_labels[labels_mask] = ignored_index
losses, lse_local = xentropy_cuda_lib.forward(logits_parallel, masked_labels, smoothing)
assert lse_local.shape == (batch,)
assert losses.shape == (batch,)
losses.masked_fill_(masked_labels==ignored_index, 0)
lse_allgather = torch.empty(world_size, batch, dtype=lse_local.dtype,
device=lse_local.device)
handle_lse = torch.distributed.all_gather_into_tensor(
lse_allgather, lse_local.contiguous(),
group=get_tensor_model_parallel_group(), async_op=True
)
handle_losses = torch.distributed.all_reduce(
losses, op=torch.distributed.ReduceOp.SUM,
group=get_tensor_model_parallel_group(), async_op=True
)
handle_lse.wait()
lse = torch.logsumexp(lse_allgather, dim=0)
# The losses are going to be lse_local - predicted_logit, we just have to subtract
# the lse_local and add the lse (global).
rank_per_sample = torch.div(labels, partition_vocab_size, rounding_mode='floor')
lse_local = lse_allgather[rank_per_sample,
torch.arange(batch, device=lse_allgather.device)]
handle_losses.wait()
losses += lse - lse_local
losses.masked_fill_(ignored_mask, 0)
ctx.save_for_backward(logits_parallel, lse, labels_local)
ctx.smoothing = smoothing
ctx.ignored_index = ignored_index
ctx.inplace_backward = inplace_backward
return losses
@staticmethod
def backward(ctx, grad_loss):
logits_parallel, lse, labels = ctx.saved_tensors
if not grad_loss.is_contiguous():
grad_loss = grad_loss.contiguous()
grad_loss.masked_fill_(labels==ctx.ignored_index, 0)
grad_logits = xentropy_cuda_lib.backward(grad_loss, logits_parallel, lse, labels,
ctx.smoothing, ctx.inplace_backward)
return grad_logits, None, None, None, None, None
class CrossEntropyLossParallel(nn.Module):
def __init__(self, ignore_index=-100, reduction='mean', label_smoothing=0.0,
inplace_backward=False):
super().__init__()
if reduction not in ['mean', 'none']:
raise NotImplementedError("Only support reduction = 'mean' or 'none'")
self.ignore_index = ignore_index
self.reduction = reduction
self.label_smoothing = label_smoothing
self.inplace_backward = inplace_backward
def forward(self, input, target):
assert input.is_cuda and target.is_cuda
# SoftmaxCrossEntropyLoss implicitly casts to float
loss = SoftmaxCrossEntropyLossParallelFn.apply(
input, target, self.label_smoothing, self.ignore_index, self.inplace_backward
)
if self.reduction == 'mean':
return loss.sum() / (target != self.ignore_index).sum()
else:
return loss
| flash-attention-main | flash_attn/losses/cross_entropy_parallel.py |
# Inspired by https://github.com/facebookresearch/xformers/blob/main/xformers/components/positional_embedding/rotary.py
from typing import Tuple
import math
import torch
from einops import rearrange, repeat
import rotary_emb
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_emb_torch(x, cos, sin):
"""
x: (batch_size, seqlen, nheads, headdim)
cos, sin: (seqlen, rotary_dim / 2)
"""
rotary_dim = cos.shape[-1] * 2
assert rotary_dim <= x.shape[-1]
cos = repeat(cos, 's d -> s 1 (2 d)')
sin = repeat(sin, 's d -> s 1 (2 d)')
return torch.cat([x[..., :rotary_dim] * cos + rotate_half(x[..., :rotary_dim]) * sin,
x[..., rotary_dim:]], dim=-1)
class ApplyRotaryEmb(torch.autograd.Function):
@staticmethod
def forward(ctx, x, cos, sin, inplace=False):
"""
x: (batch_size, seqlen, nheads, headdim)
cos, sin: (seqlen, rotary_dim / 2)
rotary_dim must be <= headdim
Apply rotary embedding to the first rotary_dim of x.
"""
batch, seqlen, nheads, headdim = x.shape
rotary_seqlen, rotary_dim = cos.shape
rotary_dim *= 2
assert rotary_dim <= headdim
assert seqlen <= rotary_seqlen
assert cos.shape == (rotary_seqlen, rotary_dim // 2)
assert sin.shape == (rotary_seqlen, rotary_dim // 2)
x1, x2 = x[..., :rotary_dim].chunk(2, dim=-1)
out = torch.empty_like(x) if not inplace else x
o1, o2 = out[..., :rotary_dim].chunk(2, dim=-1) if not inplace else (x1, x2)
rotary_emb.apply_rotary(x1, x2, rearrange(cos[:, :seqlen], 's d -> s 1 d'),
rearrange(sin[:, :seqlen], 's d -> s 1 d'), o1, o2, False)
if not inplace and rotary_dim < headdim:
out[..., rotary_dim:].copy_(x[..., rotary_dim:])
ctx.save_for_backward(cos, sin)
ctx.inplace = inplace
return out if not inplace else x
@staticmethod
def backward(ctx, do):
cos, sin = ctx.saved_tensors
_, seqlen, _, headdim = do.shape
rotary_dim = cos.shape[-1]
rotary_dim *= 2
inplace = ctx.inplace
do1, do2 = do[..., :rotary_dim].chunk(2, dim=-1)
dx = torch.empty_like(do) if not inplace else do
dx1, dx2 = dx[..., :rotary_dim].chunk(2, dim=-1) if not inplace else (do1, do2)
rotary_emb.apply_rotary(do1, do2, rearrange(cos[:, :seqlen], 's d -> s 1 d'),
rearrange(sin[:, :seqlen], 's d -> s 1 d'), dx1, dx2, True)
if not inplace and rotary_dim < headdim:
dx[..., rotary_dim:].copy_(do[..., rotary_dim:])
return dx, None, None, None
apply_rotary_emb_func = ApplyRotaryEmb.apply
class ApplyRotaryEmbQKV_(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cos, sin):
"""
qkv: (batch_size, seqlen, 3, nheads, headdim)
cos, sin: (seqlen, rotary_dim / 2)
rotary_dim must be <= headdim
Apply rotary embedding *inplace* to the first rotary_dim of q and k.
"""
batch, seqlen, three, nheads, headdim = qkv.shape
assert three == 3
rotary_seqlen, rotary_dim = cos.shape
rotary_dim *= 2
assert rotary_dim <= headdim
assert seqlen <= rotary_seqlen
assert cos.shape == (seqlen, rotary_dim // 2)
assert sin.shape == (seqlen, rotary_dim // 2)
q1, q2 = qkv[:, :, 0, :, :rotary_dim].chunk(2, dim=-1)
rotary_emb.apply_rotary(q1, q2, rearrange(cos[:, :seqlen], 's d -> s 1 d'),
rearrange(sin[:, :seqlen], 's d -> s 1 d'), q1, q2, False)
k1, k2 = qkv[:, :, 1, :, :rotary_dim].chunk(2, dim=-1)
rotary_emb.apply_rotary(k1, k2, rearrange(cos[:, :seqlen], 's d -> s 1 d'),
rearrange(sin[:, :seqlen], 's d -> s 1 d'), k1, k2, False)
ctx.save_for_backward(cos, sin)
return qkv
@staticmethod
def backward(ctx, dqkv):
cos, sin = ctx.saved_tensors
_, seqlen, _, _, headdim = dqkv.shape
rotary_dim = cos.shape[-1]
rotary_dim *= 2
dq1, dq2 = dqkv[:, :, 0, :, :rotary_dim].chunk(2, dim=-1)
rotary_emb.apply_rotary(dq1, dq2, rearrange(cos[:, :seqlen], 's d -> s 1 d'),
rearrange(sin[:, :seqlen], 's d -> s 1 d'), dq1, dq2, True)
dk1, dk2 = dqkv[:, :, 1, :, :rotary_dim].chunk(2, dim=-1)
rotary_emb.apply_rotary(dk1, dk2, rearrange(cos[:, :seqlen], 's d -> s 1 d'),
rearrange(sin[:, :seqlen], 's d -> s 1 d'), dk1, dk2, True)
return dqkv, None, None
apply_rotary_emb_qkv_ = ApplyRotaryEmbQKV_.apply
class RotaryEmbedding(torch.nn.Module):
"""
The rotary position embeddings from RoFormer_ (Su et. al).
A crucial insight from the method is that the query and keys are
transformed by rotation matrices which depend on the relative positions.
Other implementations are available in the Rotary Transformer repo_ and in
GPT-NeoX_, GPT-NeoX was an inspiration
.. _RoFormer: https://arxiv.org/abs/2104.09864
.. _repo: https://github.com/ZhuiyiTechnology/roformer
.. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
"""
def __init__(self, dim_model: int, *_, **__):
super().__init__()
# Generate and save the inverse frequency buffer (non trainable)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim_model, 2).float() / dim_model))
self.register_buffer("inv_freq", inv_freq)
self._seq_len_cached = 0
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_cache(self, x):
"""x: (batch, seqlen, nheads, headdim) or (batch, seqlen, 3, nheads, headdim)
"""
seqlen = x.shape[1]
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if (seqlen > self._seq_len_cached or self._cos_cached.device != x.device
or self._cos_cached.dtype != x.dtype):
self._seq_len_cached = seqlen
t = torch.arange(seqlen, device=x.device, dtype=self.inv_freq.dtype)
# Don't do einsum, it converts fp32 to fp16
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
freqs = torch.outer(t, self.inv_freq)
self._cos_cached = torch.cos(freqs).to(x.dtype)
self._sin_cached = torch.sin(freqs).to(x.dtype)
def forward(self, qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
self._update_cos_sin_cache(qkv)
return apply_rotary_emb_qkv_(qkv, self._cos_cached, self._sin_cached)
| flash-attention-main | flash_attn/layers/rotary.py |
# Copyright (c) 2022, Tri Dao.
""" Useful functions for writing test code. """
import torch
import torch.utils.benchmark as benchmark
def benchmark_forward(fn, *inputs, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forward pass of an arbitrary function. """
if verbose:
print(desc, '- Forward pass')
def fn_amp(*inputs, **kwinputs):
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
fn(*inputs, **kwinputs)
for _ in range(repeats): # warmup
fn_amp(*inputs, **kwinputs)
t = benchmark.Timer(
stmt='fn_amp(*inputs, **kwinputs)',
globals={'fn_amp': fn_amp, 'inputs': inputs, 'kwinputs': kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_backward(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the backward pass of an arbitrary function. """
if verbose:
print(desc, '- Backward pass')
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError('Grad shape does not match output shape')
for _ in range(repeats): # warmup
y.backward(grad, retain_graph=True)
t = benchmark.Timer(
stmt='y.backward(grad, retain_graph=True)',
globals={'y': y, 'grad': grad},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_combined(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forward+backward pass of an arbitrary function. """
if verbose:
print(desc, '- Forward + Backward pass')
def f(grad, *inputs, **kwinputs):
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError('Grad shape does not match output shape')
y.backward(grad, retain_graph=True)
for _ in range(repeats): # warmup
f(grad, *inputs, **kwinputs)
t = benchmark.Timer(
stmt='f(grad, *inputs, **kwinputs)',
globals={'f': f, 'fn': fn, 'inputs': inputs, 'grad': grad, 'kwinputs': kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_all(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False,
amp_dtype=torch.float16, **kwinputs):
""" Use Pytorch Benchmark on the forward+backward pass of an arbitrary function. """
return (
benchmark_forward(fn, *inputs, repeats=repeats, desc=desc, verbose=verbose,
amp=amp, amp_dtype=amp_dtype, **kwinputs),
benchmark_backward(fn, *inputs, grad=grad, repeats=repeats, desc=desc, verbose=verbose,
amp=amp, amp_dtype=amp_dtype, **kwinputs),
benchmark_combined(fn, *inputs, grad=grad, repeats=repeats, desc=desc, verbose=verbose,
amp=amp, amp_dtype=amp_dtype, **kwinputs),
)
def pytorch_profiler(fn, *inputs, trace_filename=None, backward=False, amp=False,
amp_dtype=torch.float16, cpu=False, verbose=True, **kwinputs):
""" Wrap benchmark functions in Pytorch profiler to see CUDA information. """
if backward:
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
g = torch.randn_like(fn(*inputs, **kwinputs))
for _ in range(30): # Warm up
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
if backward:
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
# fn(*inputs, **kwinputs) if not backward else fn(*inputs, **kwinputs).backward(g)
out = fn(*inputs, **kwinputs)
# Backward should be done outside autocast
if backward:
out.backward(g)
activities = ([torch.profiler.ProfilerActivity.CPU] if cpu else []) + [torch.profiler.ProfilerActivity.CUDA]
with torch.profiler.profile(
activities=activities,
record_shapes=True,
# profile_memory=True,
with_stack=True,
) as prof:
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
if backward:
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
out = fn(*inputs, **kwinputs)
if backward: out.backward(g)
if verbose:
# print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=50))
print(prof.key_averages().table(row_limit=50))
if trace_filename is not None:
prof.export_chrome_trace(trace_filename)
def benchmark_memory(fn, *inputs, desc='', verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
fn(*inputs, **kwinputs)
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2 ** 20) * 1000)
if verbose:
print(f'{desc} max memory: {mem}GB')
torch.cuda.empty_cache()
return mem
| flash-attention-main | flash_attn/utils/benchmark.py |
# Copyright (c) 2022, Tri Dao.
# Inspired by / adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
import math
from functools import partial
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import trunc_normal_
from einops import rearrange
from timm.models.helpers import named_apply
from flash_attn.layers.patch_embed import PatchEmbed
from flash_attn.modules.mha import MHA
from flash_attn.modules.mlp import Mlp, FusedDenseGeluDense
from flash_attn.modules.block import Block
def create_mixer_cls(num_heads, qkv_bias, attn_drop, use_flash_attn, fused_bias_fc,
cross_attn=False):
mixer_cls = partial(MHA, num_heads=num_heads, cross_attn=cross_attn, bias=qkv_bias,
dropout=attn_drop, fused_bias_fc=fused_bias_fc,
use_flash_attn=use_flash_attn)
return mixer_cls
def create_mlp_cls(embed_dim, mlp_ratio, act_layer, fused_dense_gelu_dense):
inner_dim = int(embed_dim * mlp_ratio)
if not fused_dense_gelu_dense:
mlp_cls = partial(Mlp, hidden_features=inner_dim, activation=act_layer())
else:
mlp_cls = partial(FusedDenseGeluDense, hidden_features=inner_dim)
return mlp_cls
def create_block(embed_dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, drop_path,
norm_layer, act_layer, use_flash_attn, fused_bias_fc, fused_dense_gelu_dense,
fused_dropout_add_ln, layer_idx=None, n_layer=None, last_layer_subset=False):
mixer_cls = create_mixer_cls(num_heads, qkv_bias, attn_drop_rate, use_flash_attn, fused_bias_fc,
cross_attn=(last_layer_subset and layer_idx == n_layer - 1))
mlp_cls = create_mlp_cls(embed_dim, mlp_ratio, act_layer, fused_dense_gelu_dense)
block = Block(embed_dim, mixer_cls, mlp_cls, norm_cls=norm_layer,
prenorm=True, resid_dropout=drop_rate, drop_path=drop_path,
fused_dropout_add_ln=fused_dropout_add_ln)
return block
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
init_values=None,
class_token=True,
no_embed_class=False,
pre_norm=False,
fc_norm=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
weight_init='',
embed_layer=PatchEmbed,
norm_layer=None,
act_layer=None,
use_flash_attn=False,
fused_bias_fc=False,
fused_dense_gelu_dense=False,
fused_dropout_add_ln=False,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
global_pool (str): type of global pooling for final sequence (default: 'token')
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
init_values: (float): layer-scale init values
class_token (bool): use class token
fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None)
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
weight_init (str): weight init scheme
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
act_layer: (nn.Module): MLP activation layer
"""
super().__init__()
assert global_pool == 'token', 'Only support pooling with CLS token'
assert class_token
assert init_values is None, 'LayerScale is not supported yet'
assert weight_init == ''
assert fc_norm is None
# pre_norm seems redundant, as there's a LayerNorm right at the start of each block, idk
assert not pre_norm
use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.no_embed_class = no_embed_class
patch_embed_extra_kwargs = ({'fused_bias_fc': fused_bias_fc} if embed_layer is PatchEmbed
else {})
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
**patch_embed_extra_kwargs
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens
self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
# We change the order of residual and layer norm:
# Instead of LN -> Attn / MLP -> Dropout -> Add, we do:
# Attn / MLP -> Dropout -> Add -> LN, returning both the residual branch (output of Add) and
# the main branch (output of LN). The model definition is unchanged, but the mapping of the
# nn.LayerNorm weights are changed.
# This is for performance reason: we can fuse dropout + add + layer_norm.
# self.norm_0 is the first layer norm in the model, while self.norm
# (in the pretrained weight) is the final layer norm.
self.norm_0 = norm_layer(embed_dim)
self.blocks = nn.ModuleList([create_block(
embed_dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, drop_path=dpr[i],
norm_layer=norm_layer, act_layer=act_layer, use_flash_attn=use_flash_attn,
fused_bias_fc=fused_bias_fc, fused_dense_gelu_dense=fused_dense_gelu_dense,
fused_dropout_add_ln=fused_dropout_add_ln, layer_idx=i, n_layer=depth,
last_layer_subset=(global_pool == 'token')
) for i in range(depth)])
# Classifier Head
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode == ''
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(init_weights_vit_timm, self)
def _init_weights(self, m):
# this fn left here for compat with downstream users
init_weights_vit_timm(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def _pos_embed(self, x):
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + self.pos_embed
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = x + self.pos_embed
return self.pos_drop(x)
def forward_features(self, x, all_tokens=True):
"""
If all_tokens==False and self.global_pool == 'token', we only return the features for the
cls token.
"""
x = self.patch_embed(x)
# TD [2022-10-15]: Force residual in fp32 in case of DeepSpeed
residual = self._pos_embed(x).float()
hidden_states = self.norm_0(residual.to(dtype=self.norm_0.weight.dtype))
if self.global_pool != 'token' or all_tokens:
for block in self.blocks:
hidden_states, residual = block(hidden_states, residual)
else:
for block in self.blocks[:-1]:
hidden_states, residual = block(hidden_states, residual)
# For the last layer, we only want the 1st token of the output. So we do cross-attention
# where the query is the 1st token and the key/value is the whole sequence.
hidden_states_1st = rearrange(hidden_states[:, 0], 'b d -> b 1 d')
residual_1st = rearrange(residual[:, 0], 'b d -> b 1 d')
hidden_states, _ = self.blocks[-1](hidden_states_1st, residual_1st,
mixer_kwargs={'x_kv': hidden_states})
return hidden_states
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x, all_tokens=False)
x = self.forward_head(x)
return x
def init_weights_vit_timm(module: nn.Module, name: str = ''):
""" ViT weight initialization, original timm impl (for reproducibility) """
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
assert not pretrained
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = VisionTransformer(**model_kwargs)
return model
| flash-attention-main | flash_attn/models/vit.py |
# Copyright (c) 2022, Tri Dao.
import math
from functools import partial
from collections import namedtuple
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from flash_attn.modules.mha import MHA
from flash_attn.modules.mlp import Mlp, FusedDenseGeluDense
from flash_attn.modules.block import Block
from flash_attn.modules.embedding import GPT2Embeddings
try:
from flash_attn.ops.layer_norm import dropout_add_layer_norm
except ImportError:
dropout_add_layer_norm = None
try:
from flash_attn.ops.triton.mlp import FusedDenseSqreluDense
except ImportError:
FusedDenseSqreluDense = None
def create_mixer_cls(config, layer_idx=None):
head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
softmax_scale = 1.0 if not config.scale_attn_weights else head_dim ** (-0.5)
if config.scale_attn_by_inverse_layer_idx:
assert layer_idx is not None
softmax_scale /= float(layer_idx + 1)
dwconv = getattr(config, 'attn_dwconv', False)
rotary_emb_dim = int(getattr(config, 'rotary_emb_fraction', 0.0) * head_dim)
use_flash_attn = getattr(config, 'use_flash_attn', False)
fused_bias_fc = getattr(config, 'fused_bias_fc', False)
mixer_cls = partial(MHA, num_heads=config.num_attention_heads, dropout=config.attn_pdrop,
softmax_scale=softmax_scale, causal=True, dwconv=dwconv,
rotary_emb_dim=rotary_emb_dim,
fused_bias_fc=fused_bias_fc, use_flash_attn=use_flash_attn)
return mixer_cls
def create_mlp_cls(config, layer_idx=None):
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.hidden_size
fused_dense_gelu_dense = getattr(config, 'fused_dense_gelu_dense', False)
fused_dense_sqrelu_dense = getattr(config, 'fused_dense_sqrelu_dense', False)
assert not (fused_dense_sqrelu_dense and fused_dense_gelu_dense)
if not fused_dense_gelu_dense and not fused_dense_sqrelu_dense:
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'))
else:
mlp_checkpoint_lvl = getattr(config, 'mlp_checkpoint_lvl', 0)
# mlp_checkpoint_lvl could be a list, which contains the checkpoint_lvl for each layer
if isinstance(mlp_checkpoint_lvl, Sequence):
assert layer_idx is not None
mlp_checkpoint_lvl = mlp_checkpoint_lvl[layer_idx]
if fused_dense_gelu_dense:
mlp_cls = partial(FusedDenseGeluDense, hidden_features=inner_dim,
checkpoint_lvl=mlp_checkpoint_lvl)
elif fused_dense_sqrelu_dense:
assert FusedDenseSqreluDense is not None
mlp_cls = partial(FusedDenseSqreluDense, hidden_features=inner_dim,
checkpoint_lvl=mlp_checkpoint_lvl)
else:
raise RuntimeError('MLP type not supported')
return mlp_cls
def create_block(config, layer_idx=None):
mixer_cls = create_mixer_cls(config, layer_idx)
mlp_cls = create_mlp_cls(config, layer_idx)
norm_cls = partial(nn.LayerNorm, eps=config.layer_norm_epsilon)
block = Block(config.hidden_size, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout=config.resid_pdrop,
fused_dropout_add_ln=getattr(config, 'fused_dropout_add_ln', False))
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
class GPT2Model(nn.Module):
def __init__(self, config: GPT2Config):
super().__init__()
self.pad_vocab_size_multiple_8 = getattr(config, 'pad_vocab_size_multiple_8', False)
if self.pad_vocab_size_multiple_8:
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
self.embeddings = GPT2Embeddings(config.hidden_size, config.vocab_size,
config.max_position_embeddings)
self.emb_drop = nn.Dropout(config.embd_pdrop)
# We change the order of residual and layer norm:
# Instead of LN -> Attn / MLP -> Dropout -> Add, we do:
# Attn / MLP -> Dropout -> Add -> LN, returning both the residual branch (output of Add) and
# the main branch (output of LN). The model definition is unchanged, but the mapping of the
# nn.LayerNorm weights are changed.
# This is for performance reason: we can fuse dropout + add + layer_norm.
self.fused_dropout_add_ln = getattr(config, 'fused_dropout_add_ln', False)
if self.fused_dropout_add_ln and dropout_add_layer_norm is None:
raise ImportError('dropout_add_layer_norm is not installed')
# self.ln_0 is the first layer norm in the model, while self.ln_f (in the pretrained weight)
# is the final layer norm.
self.ln_0 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.layers = nn.ModuleList([create_block(config, layer_idx=i)
for i in range(config.num_hidden_layers)])
self.apply(partial(_init_weights, n_layer=config.num_hidden_layers,
initializer_range=config.initializer_range))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids)
# TD [2022-07-30]: Force residual in fp32, seems to make fp16 training more stable
if not self.fused_dropout_add_ln:
residual = self.emb_drop(hidden_states).float()
hidden_states = self.ln_0(residual.to(dtype=self.ln_0.weight.dtype))
else:
hidden_states, residual = dropout_add_layer_norm(
hidden_states, None, self.ln_0.weight, self.ln_0.bias,
self.emb_drop.p if self.training else 0.0, self.ln_0.eps, prenorm=True,
residual_in_fp32=True
)
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
return hidden_states
class GPT2LMHeadModel(nn.Module):
def __init__(self, config: GPT2Config):
super().__init__()
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=config.num_hidden_layers,
initializer_range=config.initializer_range))
self.tie_weights()
def tie_weights(self):
self.lm_head.weight = self.transformer.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids=position_ids)
lm_logits = self.lm_head(hidden_states)
CausalLMOutput = namedtuple('CausalLMOutput', ['logits'])
return CausalLMOutput(logits=lm_logits)
| flash-attention-main | flash_attn/models/gpt.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
# We make it work with pytorch amp and with bfloat16.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
# import fused_dense_cuda # from apex
import fused_dense_lib as fused_dense_cuda
from flash_attn.ops.gelu_activation import gelu_bwd
# implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFuncTD(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x, weight, bias):
if torch.is_autocast_enabled():
dtype = torch.get_autocast_gpu_dtype()
x, weight, bias = [a.to(dtype=dtype) for a in [x, weight, bias]]
x = x.contiguous()
weight = weight.contiguous()
bias = bias.contiguous()
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
assert batch_dim <= 64 * 1024, 'fused_dense only supports dimension at most 64k'
output = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight, bias)
return output.reshape(*batch_shape, output.shape[-1])
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
if ctx.needs_input_grad[0]:
grad_input, grad_weight, grad_bias = fused_dense_cuda.linear_bias_backward(
x.reshape(batch_dim, n), weight, grad_output.reshape(batch_dim, grad_output.shape[-1])
)
grad_input = grad_input.reshape_as(x)
else:
grad_weight, grad_bias = fused_dense_cuda.linear_bias_wgrad(
x.reshape(batch_dim, n), grad_output.reshape(batch_dim, grad_output.shape[-1])
)
grad_input = None
# print((grad_bias - grad_output.view(-1, grad_output.shape[-1]).sum(dim=0)).abs().max())
return grad_input, grad_weight, grad_bias
# grad_input, grad_weight = None, None
# grad_output_reshaped = grad_output.reshape(batch_dim, grad_output.shape[-1])
# if ctx.needs_input_grad[0]:
# grad_input = (grad_output_reshaped @ weight.conj()).reshape(*batch_shape, n)
# if ctx.needs_input_grad[1]:
# grad_weight = grad_output_reshaped.t() @ x.conj().reshape(batch_dim, n)
# # We don't need to compute grad_bias explicitly, when we return grad_out Pytorch
# # will sum over the batch dimension to get grad_bias.
# return grad_input, grad_weight, grad_output
fused_dense_function_td = FusedDenseFuncTD.apply
class FusedDenseTD(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
super().__init__(in_features, out_features, bias=bias, device=device, dtype=dtype)
def forward(self, x):
if x.is_cuda and self.bias is not None:
return fused_dense_function_td(x, self.weight, self.bias)
else:
return F.linear(x, self.weight, self.bias)
class FusedDenseResidualFunc(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x, weight, bias):
if torch.is_autocast_enabled():
dtype = torch.get_autocast_gpu_dtype()
x, weight, bias = [a.to(dtype=dtype) for a in [x, weight, bias]]
x = x.contiguous()
x = x.contiguous()
weight = weight.contiguous()
bias = bias.contiguous()
ctx.save_for_backward(x, weight)
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
assert batch_dim <= 64 * 1024, 'fused_dense only supports dimension at most 64k'
output = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight, bias)
return output.reshape(*batch_shape, output.shape[-1]), x
@staticmethod
@custom_bwd
def backward(ctx, grad_output, grad_input):
grad_output = grad_output.contiguous()
grad_input = grad_input.contiguous()
x, weight = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
grad_input, grad_weight, grad_bias = fused_dense_cuda.linear_bias_residual_backward(
x.reshape(batch_dim, n), weight, grad_output.reshape(batch_dim, grad_output.shape[-1]),
grad_input.reshape(batch_dim, n)
)
return grad_input.reshape_as(x), grad_weight, grad_bias
fused_dense_residual_function = FusedDenseResidualFunc.apply
class FusedDenseResidual(nn.Linear):
"""Similar to FusedDense, but we return both the output and the input.
This is so that in the backward pass, we can combine the input gradient from the residual branch
with the input gradient from the matrix multiply, without having to do a separate addition.
"""
def forward(self, x):
if x.is_cuda and self.bias is not None:
return fused_dense_residual_function(x, self.weight, self.bias)
else:
return F.linear(x, self.weight, self.bias), x
class FusedDenseGeluDenseFuncTD(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x, weight1, bias1, weight2, bias2, checkpoint_lvl=0, heuristic=0):
"""checkpoint_lvl:
0: no recomputation in the bwd
1: recompute gelu_out in the bwd
2: recompute gelu_in and gelu_out in the bwd
"""
assert -1 <= heuristic <= 4
if torch.is_autocast_enabled():
dtype = torch.get_autocast_gpu_dtype()
x, weight1, bias1, weight2, bias2 = [a.to(dtype=dtype)
for a in [x, weight1, bias1, weight2, bias2]]
assert checkpoint_lvl in [0, 1, 2]
x = x.contiguous()
weight1 = weight1.contiguous()
bias1 = bias1.contiguous()
weight2 = weight2.contiguous()
bias2 = bias2.contiguous()
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
assert batch_dim <= 64 * 1024, 'fused_dense only supports dimension at most 64k'
# output1, output2, gelu_in = fused_dense_cuda.linear_gelu_linear_forward(
# x.reshape(batch_dim, n), weight1, bias1, weight2, bias2
# )
if heuristic == -1:
gelu_in = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight1, bias1)
output1 = F.gelu(gelu_in, approximate='tanh')
# gelu_in = F.linear(x.reshape(batch_dim, n), weight1) # This is before adding bias1
# with torch.jit.fuser('fuser2'):
# output1 = bias_gelu(gelu_in, bias1)
else:
save_gelu_in = checkpoint_lvl != 2
output1, *rest = fused_dense_cuda.linear_gelu_forward(x.reshape(batch_dim, n), weight1,
bias1, save_gelu_in, heuristic)
if save_gelu_in:
gelu_in = rest[0]
output2 = fused_dense_cuda.linear_bias_forward(output1, weight2, bias2)
ctx.checkpoint_lvl = checkpoint_lvl
ctx.heuristic = heuristic
if checkpoint_lvl == 0:
ctx.save_for_backward(x, weight1, bias1, weight2, gelu_in, output1)
elif checkpoint_lvl == 1:
ctx.save_for_backward(x, weight1, bias1, weight2, gelu_in)
elif checkpoint_lvl == 2:
ctx.save_for_backward(x, weight1, bias1, weight2)
return output2.reshape(*batch_shape, output2.shape[-1])
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
checkpoint_lvl = ctx.checkpoint_lvl
x, weight1, bias1, weight2, *rest = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
if checkpoint_lvl == 0:
gelu_in, output1 = rest
elif checkpoint_lvl == 1:
gelu_in, = rest
output1 = F.gelu(gelu_in, approximate='tanh')
elif checkpoint_lvl == 2:
# bias1, = rest
if ctx.heuristic == -1:
gelu_in = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight1, bias1)
output1 = F.gelu(gelu_in, approximate='tanh')
else:
output1, gelu_in = fused_dense_cuda.linear_gelu_forward(x.reshape(batch_dim, n),
weight1, bias1, True, ctx.heuristic)
if ctx.heuristic == -1:
grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
# grad_output1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_backward(output1, weight2, grad_output)
grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
# grad_gelu = matmul_dgelu(grad_output, weight2, gelu_in)
grad_output1 = grad_output @ weight2
with torch.jit.fuser('fuser2'):
grad_gelu = gelu_bwd(grad_output1, gelu_in)
grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
x.reshape(batch_dim, n), weight1, grad_gelu
)
# with torch.jit.fuser('fuser2'):
# grad_gelu, grad_bias1 = bias_gelu_back(grad_output1, gelu_in, bias1)
# grad_input = grad_gelu @ weight1
# grad_weight1 = grad_gelu.reshape(batch_dim, -1).T @ x.reshape(batch_dim, n)
# grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
# x.reshape(batch_dim, n), weight1, grad_gelu
# )
else:
grad_input, grad_weight1, grad_bias1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_gelu_linear_backward(
x.reshape(batch_dim, n), gelu_in, output1, weight1, weight2,
grad_output.reshape(batch_dim, grad_output.shape[-1]),
ctx.heuristic
)
# grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
# # grad_output1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_backward(output1, weight2, grad_output)
# grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
# grad_gelu = matmul_dgelu(grad_output, weight2, gelu_in)
# grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
# x.reshape(batch_dim, n), weight1, grad_gelu
# )
return grad_input.reshape_as(x), grad_weight1, grad_bias1, grad_weight2, grad_bias2, None, None
fused_dense_gelu_dense_function_td = FusedDenseGeluDenseFuncTD.apply
class FusedDenseGeluDenseTD(nn.Module):
def __init__(self, in_features, intermediate_features, out_features=None, bias=True,
checkpoint_lvl=0, heuristic=0, device=None, dtype=None):
"""
checkpoint_lvl (increasing lvl means slower but more memory saving):
0: no recomputation in the bwd
1: recompute gelu_out in the bwd
2: recompute gelu_in and gelu_out in the bwd
heuristic:
-1: don't fuse gemm + gelu (separate kernel)
0..4: use this heuristic for the algo section in the fused gemm + gelu
"""
assert checkpoint_lvl in [0, 1, 2]
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if out_features is None:
out_features = in_features
assert bias == True, "DenseGeluDense module without bias is currently not supported"
self.checkpoint_lvl = checkpoint_lvl
self.heuristic = heuristic
self.fc1 = nn.Linear(in_features, intermediate_features, bias=bias, **factory_kwargs)
self.fc2 = nn.Linear(intermediate_features, out_features, bias=bias, **factory_kwargs)
def forward(self, x):
return fused_dense_gelu_dense_function_td(x, self.fc1.weight, self.fc1.bias,
self.fc2.weight, self.fc2.bias,
self.checkpoint_lvl, self.heuristic)
class FusedDenseResGeluDenseFunc(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x, weight1, bias1, weight2, bias2, checkpoint_lvl=0, heuristic=0):
"""checkpoint_lvl:
0: no recomputation in the bwd
1: recompute gelu_out in the bwd
2: recompute gelu_in and gelu_out in the bwd
"""
assert -1 <= heuristic <= 4
if torch.is_autocast_enabled():
dtype = torch.get_autocast_gpu_dtype()
x, weight1, bias1, weight2, bias2 = [a.to(dtype=dtype)
for a in [x, weight1, bias1, weight2, bias2]]
assert checkpoint_lvl in [0, 1, 2]
x = x.contiguous()
weight1 = weight1.contiguous()
bias1 = bias1.contiguous()
weight2 = weight2.contiguous()
bias2 = bias2.contiguous()
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
assert batch_dim <= 64 * 1024, 'fused_dense only supports dimension at most 64k'
# output1, output2, gelu_in = fused_dense_cuda.linear_gelu_linear_forward(
# x.reshape(batch_dim, n), weight1, bias1, weight2, bias2
# )
# gelu_in = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight1, bias1)
# output1 = F.gelu(gelu_in, approximate='tanh')
save_gelu_in = checkpoint_lvl != 2
output1, *rest = fused_dense_cuda.linear_gelu_forward(x.reshape(batch_dim, n), weight1,
bias1, save_gelu_in, heuristic)
if save_gelu_in:
gelu_in = rest[0]
output2 = fused_dense_cuda.linear_bias_forward(output1, weight2, bias2)
ctx.checkpoint_lvl = checkpoint_lvl
ctx.heuristic = heuristic
if checkpoint_lvl == 0:
ctx.save_for_backward(x, weight1, weight2, gelu_in, output1)
elif checkpoint_lvl == 1:
ctx.save_for_backward(x, weight1, weight2, gelu_in)
elif checkpoint_lvl == 2:
ctx.save_for_backward(x, weight1, weight2, bias1)
return output2.reshape(*batch_shape, output2.shape[-1]), x
@staticmethod
@custom_bwd
def backward(ctx, grad_output, grad_input):
grad_output = grad_output.contiguous()
grad_input = grad_input.contiguous()
checkpoint_lvl = ctx.checkpoint_lvl
x, weight1, weight2, *rest = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
if checkpoint_lvl == 0:
gelu_in, output1 = rest
elif checkpoint_lvl == 1:
gelu_in, = rest
output1 = F.gelu(gelu_in, approximate='tanh')
elif checkpoint_lvl == 2:
bias1, = rest
output1, gelu_in = fused_dense_cuda.linear_gelu_forward(x.reshape(batch_dim, n),
weight1, bias1, True, ctx.heuristic)
grad_input, grad_weight1, grad_bias1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_residual_gelu_linear_backward(
x.reshape(batch_dim, n), gelu_in, output1, weight1, weight2,
grad_output.reshape(batch_dim, grad_output.shape[-1]),
grad_input.reshape(batch_dim, n),
ctx.heuristic
)
# grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
# # grad_output1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_backward(output1, weight2, grad_output)
# grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
# grad_gelu = matmul_dgelu(grad_output, weight2, gelu_in)
# grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_residual_backward(
# x.reshape(batch_dim, n), weight1, grad_gelu,
# grad_input.reshape(batch_dim, n)
# )
return grad_input.reshape_as(x), grad_weight1, grad_bias1, grad_weight2, grad_bias2, None, None
fused_dense_res_gelu_dense_function_td = FusedDenseResGeluDenseFunc.apply
class FusedDenseResGeluDense(FusedDenseGeluDenseTD):
def forward(self, x):
return fused_dense_res_gelu_dense_function_td(x, self.fc1.weight, self.fc1.bias,
self.fc2.weight, self.fc2.bias,
self.checkpoint_lvl, False, self.heuristic)
| flash-attention-main | flash_attn/ops/fused_dense.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
import torch
from torch.nn import init
# from apex._autocast_utils import _cast_if_autocast_enabled
import dropout_layer_norm
def _dropout_add_layer_norm_forward(x0, x1, gamma, beta, rowscale, dropout_p, epsilon,
residual_in_fp32):
""" Assume that arguments are contiguous
"""
hidden_size = gamma.numel()
x0mat = x0.view((-1, hidden_size))
x1mat = x1.view((-1, hidden_size)) if x1 is not None else None
rowscale = rowscale.view(-1) if rowscale is not None else None
zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
x0mat, x1mat, gamma, beta, rowscale, dropout_p, epsilon, None, residual_in_fp32
)
# dmask is None if dropout_p == 0.0
# xmat is None if dropout_p == 0.0 and x1 is None and residual_dtype != input_dtype
return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
def _dropout_add_layer_norm_backward(dz, x, dmask, mu, rsigma, gamma, rowscale, dropout_p,
has_residual):
""" Assume that arguments are contiguous
"""
# dmask is None if dropout_p == 0.0
hidden_size = gamma.numel()
xmat = x.view((-1, hidden_size))
dzmat = dz.view(xmat.shape)
rowscale = rowscale.view(-1) if rowscale is not None else None
dx0mat, dx1mat, dgamma, dbeta, _, _ = dropout_layer_norm.dropout_add_ln_bwd(
dzmat, xmat, dmask, mu, rsigma, gamma, rowscale, dropout_p, has_residual
)
# dx1mat is None if not has_residual
return dx0mat, dx1mat, dgamma, dbeta
def _dropout_add_layer_norm_prenorm_backward(dz, dx, x, dmask, mu, rsigma, gamma, rowscale,
dropout_p, has_residual):
""" Assume that arguments are contiguous
"""
hidden_size = gamma.numel()
xmat = x.view((-1, hidden_size))
dzmat = dz.view(xmat.shape)
dxmat = dx.view(xmat.shape)
rowscale = rowscale.view(-1) if rowscale is not None else None
dx0mat, dx1mat, dgamma, dbeta, _, _ = dropout_layer_norm.dropout_add_ln_prenorm_bwd(
dzmat, dxmat, xmat, dmask, mu, rsigma, gamma, rowscale, dropout_p, has_residual
)
return dx0mat, dx1mat, dgamma, dbeta
class DropoutAddLayerNormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x1, gamma, beta, rowscale, dropout_p, epsilon, residual_in_fp32,
return_dmask=False):
x0 = x0.contiguous()
x1 = x1.contiguous() if x1 is not None else None
gamma = gamma.contiguous()
beta = beta.contiguous()
rowscale = rowscale.contiguous() if rowscale is not None else None
zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_forward(
x0, x1, gamma, beta, rowscale, dropout_p, epsilon, residual_in_fp32
)
ctx.save_for_backward(xmat.view(x0.shape), dmask, gamma, mu, rsigma, rowscale)
ctx.dropout_p = dropout_p
ctx.has_residual = x1 is not None
if not return_dmask:
return zmat.view(x0.shape)
else:
dmask = (dmask.view(x0.shape) if dropout_p > 0.
else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device))
ctx.mark_non_differentiable(dmask)
return zmat.view(x0.shape), dmask
@staticmethod
def backward(ctx, dz, *args):
# assert dz.is_contiguous()
dz = dz.contiguous() # this happens!
x, dmask, gamma, mu, rsigma, rowscale = ctx.saved_tensors
dropout_p = ctx.dropout_p
has_residual = ctx.has_residual
dx0mat, dx1mat, dgamma, dbeta = _dropout_add_layer_norm_backward(
dz, x, dmask, mu, rsigma, gamma, rowscale, dropout_p, has_residual
)
dx0 = dx0mat.view(x.shape)
dx1 = dx1mat.view(x.shape) if dx1mat is not None else None
return dx0, dx1, dgamma, dbeta, None, None, None, None, None
class DropoutAddLayerNormPrenormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x1, gamma, beta, rowscale, dropout_p, epsilon, residual_in_fp32,
return_dmask=False):
x0 = x0.contiguous()
x1 = x1.contiguous() if x1 is not None else None
gamma = gamma.contiguous()
beta = beta.contiguous()
rowscale = rowscale.contiguous() if rowscale is not None else None
zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_forward(
x0, x1, gamma, beta, rowscale, dropout_p, epsilon, residual_in_fp32
)
ctx.save_for_backward(xmat.view(x0.shape), dmask, gamma, mu, rsigma, rowscale)
ctx.dropout_p = dropout_p
ctx.has_residual = x1 is not None
if not return_dmask:
return zmat.view(x0.shape), xmat.view(x0.shape)
else:
dmask = (dmask.view(x0.shape) if dropout_p > 0.
else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device))
ctx.mark_non_differentiable(dmask)
return zmat.view(x0.shape), xmat.view(x0.shape), dmask
@staticmethod
def backward(ctx, dz, dx, *args):
# assert dz.is_contiguous()
dz = dz.contiguous() # this happens!
dx = dx.contiguous() # this happens!
x, dmask, gamma, mu, rsigma, rowscale = ctx.saved_tensors
dropout_p = ctx.dropout_p
has_residual = ctx.has_residual
dx0mat, dx1mat, dgamma, dbeta = _dropout_add_layer_norm_prenorm_backward(
dz, dx, x, dmask, mu, rsigma, gamma, rowscale, dropout_p, has_residual
)
dx0 = dx0mat.view(x.shape)
dx1 = dx1mat.view(x.shape) if dx1mat is not None else None
return dx0, dx1, dgamma, dbeta, None, None, None, None, None
def dropout_add_layer_norm(x0, x1, weight, bias, dropout_p, epsilon, rowscale=None,
prenorm=False, residual_in_fp32=False,
return_dropout_mask=False):
"""residual_in_fp32 only has an effect if x1 is None.
Otherwise residual dtype is x1.dtype.
"""
args = (x0, x1, weight, bias, rowscale, dropout_p, epsilon, residual_in_fp32,
return_dropout_mask)
if not prenorm:
return DropoutAddLayerNormFN.apply(*args)
else:
return DropoutAddLayerNormPrenormFN.apply(*args)
class DropoutAddLayerNorm(torch.nn.Module):
def __init__(self, hidden_size, prenorm=False, p=0.5, eps=1e-5, residual_in_fp32=False,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.prenorm = prenorm
self.p = p
self.epsilon = eps
self.residual_in_fp32 = residual_in_fp32
self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
self.bias = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
self.reset_parameters()
def reset_parameters(self):
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, x0, x1=None):
return dropout_add_layer_norm(x0, x1, self.weight, self.bias,
self.p if self.training else 0.0, self.epsilon,
prenorm=self.prenorm, residual_in_fp32=self.residual_in_fp32)
| flash-attention-main | flash_attn/ops/layer_norm.py |
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
import math
import torch
from torch import nn
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(y, bias):
x = bias + y
return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=y.dtype)
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def bias_gelu_back(g, y, bias):
"""Assume that y has shape (B, D) and bias has shape (D)
"""
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
grad_y = ff * g
return grad_y.to(dtype=y.dtype), grad_y.sum(dim=(0), dtype=bias.dtype)
class GeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(input, bias)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_back(grad_output, input, bias)
return tmp, tmp
bias_gelu_impl = GeLUFunction.apply
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def gelu_fwd(x):
return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=x.dtype)
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def gelu_bwd(g, x):
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return (ff * g).to(dtype=x.dtype)
class FastGeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input):
ctx.save_for_backward(input)
return gelu_fwd(input)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
tmp = gelu_bwd(grad_output, input)
return tmp
fast_gelu_impl = FastGeLUFunction.apply
| flash-attention-main | flash_attn/ops/gelu_activation.py |
# Adapted on https://github.com/ELS-RD/kernl/blob/main/src/kernl/implementations/linear_layer.py
# and https://github.com/openai/triton/blob/master/python/triton/ops/matmul.py
from typing import Optional
import torch
import triton
import triton.language as tl
from torch.autograd.function import FunctionCtx
from torch.cuda.amp import custom_fwd
from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
from flash_attn.ops.triton.k_activations import gelu, gelu_grad, gelu_approx, gelu_approx_grad, squared_relu, squared_relu_grad
# CREDITS: Initially inspired by the Triton tutorial on matrix multiplications
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
def get_configs_io_bound():
configs = []
for num_stages in [2, 3, 4, 5, 6]:
for block_m in [16, 32]:
for block_k in [32, 64]:
for block_n in [32, 64, 128, 256]:
num_warps = 2 if block_n <= 64 else 4
configs.append(
triton.Config(
{"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1},
num_stages=num_stages,
num_warps=num_warps,
)
)
# split_k not used
# for split_k in [2, 4, 8, 16]:
# configs.append(triton.Config(
# {'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
# num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
return configs
@triton.autotune(
configs=[
triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2),
# good for int8
triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2),
]
+ get_configs_io_bound(),
key=["CACHE_KEY_M", "CACHE_KEY_N", "CACHE_KEY_K"],
prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10},
)
@triton.heuristics(
{
"EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
}
)
@triton.jit
def kernel_fwd(
C, # Pointers to matrices
ACT_INPUT,
A,
B,
bias,
# Matrix dimensions
M,
N,
K,
CACHE_KEY_M,
CACHE_KEY_N,
CACHE_KEY_K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_cm,
# stride_cn, # Assume that stride_cn == 1
stride_am,
stride_ak,
stride_bn,
stride_bk,
# Meta-parameters
BLOCK_M: tl.constexpr,
GROUP_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
# split k not used, not performant with activation, kept because early_config_prune is expecting it
SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr,
A_ROWMAJOR: tl.constexpr,
B_COLMAJOR: tl.constexpr,
BIAS: tl.constexpr,
SAVE_ACT_INPUT: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""
Kernel for computing Out = activation(A x W + C)
- Input has shape (M, K)
- Weight has shape (K, N)
- Bias has shape (N,)
- Output has shape (M, N)
- ActInputs (optional) has shape (M, N)
'ActInputs' optionally saves the A x W + C intermediate for backward computations
This kernel will consolidate over K
"""
pid = tl.program_id(axis=0)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
# now compute the block that each program will go through
# rm (resp. rn) denotes a range of indices
# for rows (resp. col) of C
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# trick to avoid masking on M and N axis
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = tl.arange(0, BLOCK_K)
if A_ROWMAJOR:
A = A + (ram[:, None] * stride_am + rk[None, :])
else:
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
if B_COLMAJOR:
B = B + (rk[:, None] + rbn[None, :] * stride_bn)
else:
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(K, 0, -BLOCK_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.0)
b = tl.load(B, mask=rk[:, None] < k, other=0.0)
acc += tl.dot(a, b)
if A_ROWMAJOR:
A += BLOCK_K
else:
A += BLOCK_K * stride_ak
if B_COLMAJOR:
B += BLOCK_K
else:
B += BLOCK_K * stride_bk
# Putting bias after the matmul (instead of before) is faster, idk why
if BIAS:
bias = tl.load(bias + rn, mask=rn < N, other=0.0).to(tl.float32)
acc += bias[None, :]
# optional: save the activation inputs
if SAVE_ACT_INPUT:
# act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :] * stride_cn
act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :]
tl.store(act_in_ptrs, acc)
# optional: fused activation (while the data is in shared memory)
if ACTIVATION == "gelu":
acc = gelu(acc)
elif ACTIVATION == "gelu_approx":
acc = gelu_approx(acc)
elif ACTIVATION == "squared_relu":
acc = squared_relu(acc)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# write back result
# C = C + rm[:, None] * stride_cm + rn[None, :] * stride_cn
C = C + rm[:, None] * stride_cm + rn[None, :]
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.store(C, acc)
def triton_linear_act(
x: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor] = None,
activation: str = 'id',
save_act_input: bool = False,
) -> torch.Tensor:
"""
Compute e = activation(x @ weight.T + bias).
This wrapper kicks the `kernel_fwd` Triton kernel
:param x: input tensor
:param weight: weight matrix
:param bias: an optional bias tensor
:param activation: Activation name. Needs to be a Triton kernel.
:param act_input: an optional tensor to save the activation inputs (for backward)
:return: result tensor
"""
# if torch.is_autocast_enabled():
# dtype = torch.get_autocast_gpu_dtype()
# x, weight, bias = [a.to(dtype=dtype) for a in [x, weight, bias]]
assert activation in ['id', 'gelu', 'gelu_approx', 'squared_relu']
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
x_reshaped = x.reshape(batch_dim, n)
if x_reshaped.stride(0) > 1 and x_reshaped.stride(1) > 1:
x_reshaped = x_reshaped.contiguous()
if weight.stride(0) > 1 and weight.stride(1) > 1:
weight = weight.contiguous()
bias = bias.contiguous() if bias is not None else None
assert x.dtype == weight.dtype, f"Input and weight must have the same dtype, got {x.dtype} and {weight.dtype}"
if bias is not None:
assert x.dtype == bias.dtype, f"Input and bias must have the same dtype, got {x.dtype} and {bias.dtype}"
assert x_reshaped.shape[1] == weight.shape[1], f"Incompatible dimensions: {x_reshaped.shape} - {weight.shape}"
assert bias is None or bias.shape[0] == weight.shape[0], "Incompatible dimensions in between weight and bias"
M, K = x_reshaped.shape
N, K = weight.shape
output = torch.empty((M, N), device=x.device, dtype=x.dtype)
act_input = torch.empty_like(output) if save_act_input else None
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
kernel_fwd[grid](
output,
act_input,
x_reshaped,
weight, # data ptrs
bias if bias is not None else x, # auto skip bias if not present
M, # shapes
N,
K,
M // 32, # key for triton cache (limit number of compilations)
N // 32,
K // 32,
stride_cm=output.stride(0), # strides
# stride_cn=output.stride(1),
stride_am=x_reshaped.stride(0),
stride_ak=x_reshaped.stride(1),
stride_bk=weight.stride(1),
stride_bn=weight.stride(0),
BIAS=bias is not None, # optional fused bias
SAVE_ACT_INPUT=save_act_input, # optional save activation inputs
ACTIVATION=activation, # optional fused activation
A_ROWMAJOR=x_reshaped.stride(1) == 1,
B_COLMAJOR=weight.stride(1) == 1,
GROUP_M=8, # speed optimization: group the programs
)
if not save_act_input:
return output.reshape(*batch_shape, output.shape[-1])
else:
return (output.reshape(*batch_shape, output.shape[-1]),
act_input.reshape(*batch_shape, act_input.shape[-1]))
@triton.autotune(
configs=[
triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2),
# good for int8
triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2),
]
+ get_configs_io_bound(),
key=["CACHE_KEY_M", "CACHE_KEY_N", "CACHE_KEY_K"],
prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10},
)
@triton.heuristics(
{
"EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
}
)
@triton.jit
def kernel_bwd(
C, # Pointers to matrices
ACT_INPUT,
A,
B,
# Matrix dimensions
M,
N,
K,
CACHE_KEY_M,
CACHE_KEY_N,
CACHE_KEY_K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_cm,
# stride_cn, # Assume that stride_cn == 1
stride_am,
stride_ak,
stride_bk,
stride_bn,
# Meta-parameters
BLOCK_M: tl.constexpr,
GROUP_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
# split k not used, not performant with activation, kept because early_config_prune is expecting it
SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""
Kernel for computing Out = activation(A x W + C)
- Input has shape (M, K)
- Weight has shape (K, N)
- Output has shape (M, N)
- ActInputs (optional) has shape (M, N)
'ActInputs' optionally saves the A x W + C intermediate for backward computations
This kernel will consolidate over K
"""
pid = tl.program_id(axis=0)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
# now compute the block that each program will go through
# rm (resp. rn) denotes a range of indices
# for rows (resp. col) of C
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# trick to avoid masking on M and N axis
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = tl.arange(0, BLOCK_K)
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(K, 0, -BLOCK_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.0)
b = tl.load(B, mask=rk[:, None] < k, other=0.0)
acc += tl.dot(a, b)
A += BLOCK_K * stride_ak
B += BLOCK_K * stride_bk
# optional: fused activation (while the data is in shared memory)
if ACTIVATION != 'id':
act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :]
act_input = tl.load(act_in_ptrs).to(acc.dtype)
if ACTIVATION == "gelu":
acc *= gelu_grad(act_input)
elif ACTIVATION == "gelu_approx":
acc *= gelu_approx_grad(act_input)
elif ACTIVATION == "squared_relu":
acc *= squared_relu_grad(act_input)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# write back result
C = C + rm[:, None] * stride_cm + rn[None, :]
mask = (rm < M)[:, None] & (rn < N)[None, :]
tl.store(C, acc, mask=mask)
def triton_dgrad_act(
grad_output: torch.Tensor,
weight: torch.Tensor,
activation: str = 'id',
act_input: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Compute e = activation(grad_output @ weight + bias).
This wrapper kicks the `kernel_fwd` Triton kernel
:param grad_output: input tensor
:param weight: weight matrix
:param activation: Activation name. Needs to be a Triton kernel.
:param act_input: an optional tensor to save the activation inputs (for backward)
:return: result tensor
"""
assert activation in ['id', 'gelu', 'gelu_approx', 'squared_relu']
batch_shape, n = grad_output.shape[:-1], grad_output.shape[-1]
batch_dim = batch_shape.numel()
grad_output_reshaped = grad_output.reshape(batch_dim, n)
if grad_output_reshaped.stride(0) > 1 and grad_output_reshaped.stride(1) > 1:
grad_output_reshaped = grad_output_reshaped.contiguous()
if weight.stride(0) > 1 and weight.stride(1) > 1:
weight = weight.contiguous()
assert grad_output.dtype == weight.dtype, f"grad_output and weight must have the same dtype, got {grad_output.dtype} and {weight.dtype}"
assert grad_output_reshaped.shape[1] == weight.shape[0], f"Incompatible dimensions: {grad_output_reshaped.shape} - {weight.shape}"
if activation != 'id':
assert act_input is not None, f'act_input is required for activation {activation}'
# M, N, K in bwd are different from M, N, K in fwd
M, K = grad_output_reshaped.shape
K, N = weight.shape
grad_input = torch.empty((M, N), device=grad_output.device, dtype=grad_output.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
kernel_bwd[grid](
grad_input,
act_input,
grad_output_reshaped,
weight, # data ptrs
M, # shapes
N,
K,
M // 32, # key for triton cache (limit number of compilations)
N // 32,
K // 32,
stride_cm=grad_input.stride(0), # strides
# stride_cn=grad_input.stride(1),
stride_am=grad_output_reshaped.stride(0),
stride_ak=grad_output_reshaped.stride(1),
stride_bk=weight.stride(0),
stride_bn=weight.stride(1),
ACTIVATION=activation, # optional fused activation
GROUP_M=8, # speed optimization: group the programs
)
return grad_input.reshape(*batch_shape, grad_input.shape[-1])
| flash-attention-main | flash_attn/ops/triton/linear.py |
# Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
from enum import Enum
from typing import Optional
import triton
import triton.language as tl
_sqrt2pi = math.sqrt(2.0 / math.pi)
_sqrt1_2 = math.sqrt(1.0 / 2)
_gaussian_pdf_normalization = 1.0 / math.sqrt(2 * math.pi)
class Activation(str, Enum):
SquaredReLU = "squared_relu"
GeLU = "gelu"
GeLUApprox = "gelu_approx"
LeakyReLU = "leaky_relu"
ReLU = "relu"
def get_triton_activation_kernel(activation: Optional[Activation]):
return (
{
Activation.ReLU: relu,
Activation.LeakyReLU: leaky_relu,
Activation.GeLU: gelu,
Activation.GeLUApprox: gelu_approx,
Activation.SquaredReLU: squared_relu,
}[activation]
if activation
else None
)
def get_triton_activation_bwd_kernel(activation: Optional[Activation]):
return (
{
Activation.ReLU: relu_grad,
Activation.LeakyReLU: leaky_relu_grad,
Activation.GeLU: gelu_grad,
Activation.GeLUApprox: gelu_approx_grad,
Activation.SquaredReLU: squared_relu_grad,
}[activation]
if activation
else None
)
@triton.jit
def tanh(x):
# Tanh is just a scaled sigmoid
return 2 * tl.sigmoid(2 * x) - 1
@triton.jit
def cosh(x):
exp_x = tl.exp(x)
return (exp_x + 1.0 / exp_x) * 0.5
# a Triton implementation of the most used activations
# See for instance http://arxiv.org/abs/1606.08415 for an overview
# ReLU
@triton.jit
def relu(x):
"""
ReLU_ activation function
.. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
"""
zero = 0.0
return tl.where(x >= 0, x, zero.to(x.dtype))
@triton.jit
def relu_grad(x):
# ReLU is different from other activations
# in that it does not require the input to retrospectively compute its gradient
# here the input is the downstream gradient, and we return the upstream gradient directly
zero = 0.0
one = 1.0
return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))
@triton.jit
def squared_relu(x):
"""
Squared ReLU activation, as proposed in the Primer_ paper.
.. _Primer: https://arxiv.org/abs/2109.08668
"""
x_ = relu(x)
return (x_ * x_).to(x.dtype)
@triton.jit
def squared_relu_grad(x):
return tl.where(x >= 0, 2.0 * x, 0.0)
# Leaky ReLU
@triton.jit
def leaky_relu(x):
"""
LeakyReLU_ activation
.. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
"""
scale = 0.01 + 0.0
scale = scale.to(x.dtype)
return tl.where(x >= 0, x, scale * x)
@triton.jit
def leaky_relu_grad(x):
min_grad = 0.01
max_grad = 1
min_grad = min_grad.to(x.dtype)
max_grad = max_grad.to(x.dtype)
return tl.where(x >= 0, max_grad, min_grad)
@triton.jit
def gelu(x):
"""Gaussian Error Linear Unit (GELU)"""
return x * 0.5 * (1.0 + tl.libdevice.erf(x * _sqrt1_2))
@triton.jit
def gelu_grad(x):
cdf = 0.5 * (1.0 + tl.libdevice.erf(x * _sqrt1_2))
pdf = tl.exp(-0.5 * x * x) * _gaussian_pdf_normalization
return cdf + x * pdf
@triton.jit
def gelu_approx(x):
"""
GeLU_ activation - Gaussian error linear unit, with tanh approximation
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
"""
return 0.5 * x * (1.0 + tanh(_sqrt2pi * x * (1.0 + 0.044715 * x * x)))
@triton.jit
def gelu_approx_grad(x):
# CREDITS: Fast implementation proposed in
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30
tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))
return 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
| flash-attention-main | flash_attn/ops/triton/k_activations.py |
# The triton fused matmul + sqrelu is faster for fp16 but slower for bf16, compared
# to naive implementation.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import custom_bwd, custom_fwd
import fused_dense_lib as fused_dense_cuda
from flash_attn.ops.triton.linear import triton_linear_act, triton_dgrad_act
@torch.jit.script
def sqrelu_fwd(x):
r = F.relu(x)
return (r * r).to(dtype=x.dtype)
@torch.jit.script
def sqrelu_bwd(g, x):
return (2.0 * g * F.relu(x)).to(dtype=x.dtype)
class FusedDenseSqreluDenseFunc(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x, weight1, bias1, weight2, bias2, checkpoint_lvl=0):
"""checkpoint_lvl:
0: no recomputation in the bwd
1: recompute gelu_out in the bwd
2: recompute act_input and gelu_out in the bwd
"""
if torch.is_autocast_enabled():
dtype = torch.get_autocast_gpu_dtype()
x, weight1, bias1, weight2, bias2 = [a.to(dtype=dtype)
for a in [x, weight1, bias1, weight2, bias2]]
is_bf16 = x.dtype == torch.bfloat16
assert checkpoint_lvl in [0, 1, 2]
x = x.contiguous()
weight1 = weight1.contiguous()
bias1 = bias1.contiguous()
weight2 = weight2.contiguous()
bias2 = bias2.contiguous()
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
if is_bf16:
act_input = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight1, bias1)
output1 = sqrelu_fwd(act_input)
else:
save_act_input = checkpoint_lvl != 2
result = triton_linear_act(
x.reshape(batch_dim, n), weight1, bias1, activation='squared_relu',
save_act_input=save_act_input
)
if save_act_input:
output1, act_input = result
else:
output1 = result
output2 = fused_dense_cuda.linear_bias_forward(output1, weight2, bias2)
ctx.checkpoint_lvl = checkpoint_lvl
if checkpoint_lvl == 0:
ctx.save_for_backward(x, weight1, bias1, weight2, act_input, output1)
elif checkpoint_lvl == 1:
ctx.save_for_backward(x, weight1, bias1, weight2, act_input)
elif checkpoint_lvl == 2:
ctx.save_for_backward(x, weight1, bias1, weight2)
return output2.reshape(*batch_shape, output2.shape[-1])
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
checkpoint_lvl = ctx.checkpoint_lvl
x, weight1, bias1, weight2, *rest = ctx.saved_tensors
batch_shape, n = x.shape[:-1], x.shape[-1]
batch_dim = batch_shape.numel()
is_bf16 = x.dtype == torch.bfloat16
if checkpoint_lvl == 0:
act_input, output1 = rest
elif checkpoint_lvl == 1:
act_input, = rest
output1 = sqrelu_fwd(act_input)
elif checkpoint_lvl == 2:
if is_bf16:
act_input = fused_dense_cuda.linear_bias_forward(x.reshape(batch_dim, n), weight1, bias1)
output1 = sqrelu_fwd(act_input)
else:
output1, act_input = triton_linear_act(
x.reshape(batch_dim, n), weight1, bias1, activation='squared_relu',
save_act_input=True
)
if is_bf16:
grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
grad_output1 = grad_output @ weight2
grad_act_input = sqrelu_bwd(grad_output1, act_input)
grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
x.reshape(batch_dim, n), weight1, grad_act_input
)
else:
grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
grad_act_input = triton_dgrad_act(grad_output, weight2, activation='squared_relu',
act_input=act_input)
grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
x.reshape(batch_dim, n), weight1, grad_act_input
)
return grad_input.reshape_as(x), grad_weight1, grad_bias1, grad_weight2, grad_bias2, None
fused_dense_sqrelu_dense_function = FusedDenseSqreluDenseFunc.apply
class FusedDenseSqreluDense(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, bias=True,
checkpoint_lvl=0, device=None, dtype=None):
"""
checkpoint_lvl (increasing lvl means slower but more memory saving):
0: no recomputation in the bwd
1: recompute gelu_out in the bwd
2: recompute gelu_in and gelu_out in the bwd
"""
assert checkpoint_lvl in [0, 1, 2]
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert bias == True, "DenseSqreluDense module without bias is currently not supported"
self.checkpoint_lvl = checkpoint_lvl
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias, **factory_kwargs)
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, **factory_kwargs)
def forward(self, x):
assert x.is_cuda
return fused_dense_sqrelu_dense_function(x, self.fc1.weight, self.fc1.bias,
self.fc2.weight, self.fc2.bias,
self.checkpoint_lvl)
| flash-attention-main | flash_attn/ops/triton/mlp.py |
# Copyright (c) 2022, Tri Dao.
import torch
import torch.nn as nn
from einops import repeat
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
"""
super().__init__()
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
input_embeddings = self.word_embeddings(input_ids)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = repeat(torch.arange(seqlen, dtype=torch.long,
device=input_ids.device),
's -> b s', b=batch_size)
position_embeddings = self.position_embeddings(position_ids)
return input_embeddings + position_embeddings
else:
return input_embeddings
| flash-attention-main | flash_attn/modules/embedding.py |
# Copyright (c) 2022, Tri Dao.
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from flash_attn.ops.fused_dense import fused_dense_gelu_dense_function_td
from flash_attn.ops.fused_dense import fused_dense_res_gelu_dense_function_td
except ImportError:
fused_dense_gelu_dense_function_td = None
fused_dense_res_gelu_dense_function_td = None
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
return x
class FusedDenseGeluDense(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, bias=True,
checkpoint_lvl=0, heuristic=0, return_residual=False, device=None, dtype=None):
"""
checkpoint_lvl (increasing lvl means slower but more memory saving):
0: no recomputation in the bwd
1: recompute gelu_out in the bwd
2: recompute gelu_in and gelu_out in the bwd
heuristic:
-1: don't fuse gemm + gelu (separate kernel)
0..4: use this heuristic for the algo section in the fused gemm + gelu
For CUDA >= 11.8, you'd want heuristic=0 for both fp16 and bf16 for best perf.
For CUDA <= 11.7, you'd want heuristic=1 for fp16 and heuristic=-1 for bf16.
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
assert checkpoint_lvl in [0, 1, 2]
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
assert bias == True, "DenseGeluDense module without bias is currently not supported"
assert (fused_dense_gelu_dense_function_td is not None
and fused_dense_res_gelu_dense_function_td is not None), 'fused_dense_lib is not installed'
self.checkpoint_lvl = checkpoint_lvl
self.heuristic = heuristic
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias, **factory_kwargs)
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, **factory_kwargs)
def forward(self, x):
assert x.dtype in [torch.float16, torch.bfloat16]
assert x.is_cuda
fn = (fused_dense_gelu_dense_function_td if not self.return_residual
else fused_dense_res_gelu_dense_function_td)
return fn(x, self.fc1.weight, self.fc1.bias, self.fc2.weight, self.fc2.bias,
self.checkpoint_lvl, self.heuristic)
| flash-attention-main | flash_attn/modules/mlp.py |
# Copyright (c) 2022, Tri Dao.
from typing import Optional
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torchvision.ops import StochasticDepth
from flash_attn.modules.mha import MHA
from flash_attn.modules.mlp import Mlp
try:
from flash_attn.ops.layer_norm import dropout_add_layer_norm
except ImportError:
dropout_add_layer_norm = None
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout=0., drop_path=0.,
fused_dropout_add_ln=False):
super().__init__()
self.prenorm = prenorm
self.fused_dropout_add_ln = fused_dropout_add_ln
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls(dim)
self.dropout1 = dropout_cls(resid_dropout)
self.drop_path1 = StochasticDepth(drop_path, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout)
self.drop_path2 = StochasticDepth(drop_path, mode='row')
self.norm2 = norm_cls(dim)
if self.fused_dropout_add_ln:
assert dropout_add_layer_norm is not None, 'dropout_add_ln is not installed'
assert isinstance(self.norm1, nn.LayerNorm) and isinstance(self.dropout1, nn.Dropout)
def forward(self, hidden_states: Tensor, residual: Optional[Tensor] = None,
mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = LayerNorm(residual)
"""
if self.prenorm:
assert residual is not None
mixer_out = self.mixer(hidden_states,
**(mixer_kwargs if mixer_kwargs is not None else {}))
if not self.fused_dropout_add_ln:
residual = self.drop_path1(self.dropout1(mixer_out)) + residual
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
else:
if self.drop_path1.p == 0 or not self.training:
rowscale1 = None
else:
rowscale1 = self.drop_path1(torch.ones(
mixer_out.shape[:-1], device=mixer_out.device, dtype=mixer_out.dtype)
)
hidden_states, residual = dropout_add_layer_norm(
mixer_out, residual, self.norm1.weight, self.norm1.bias,
self.dropout1.p if self.training else 0.0, self.norm1.eps,
rowscale=rowscale1, prenorm=True
)
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if not self.fused_dropout_add_ln:
residual = self.drop_path2(self.dropout2(mlp_out)) + residual
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
else:
if self.drop_path2.p == 0 or not self.training:
rowscale2 = None
else:
rowscale2 = self.drop_path2(torch.ones(
mlp_out.shape[:-1], device=mlp_out.device, dtype=mlp_out.dtype)
)
hidden_states, residual = dropout_add_layer_norm(
mlp_out, residual, self.norm2.weight, self.norm2.bias,
self.dropout2.p if self.training else 0.0, self.norm2.eps,
rowscale=rowscale2, prenorm=True
)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(hidden_states,
**(mixer_kwargs if mixer_kwargs is not None else {}))
if not self.fused_dropout_add_ln:
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
else:
if self.drop_path1.p == 0 or not self.training:
rowscale1 = None
else:
rowscale1 = self.drop_path1(torch.ones(
mixer_out.shape[:-1], device=mixer_out.device, dtype=mixer_out.dtype)
)
hidden_states = dropout_add_layer_norm(
mixer_out, hidden_states, self.norm1.weight, self.norm1.bias,
self.dropout1.p if self.training else 0.0, self.norm1.eps,
rowscale=rowscale1, prenorm=False
)
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if not self.fused_dropout_add_ln:
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
else:
if self.drop_path2.p == 0 or not self.training:
rowscale2 = None
else:
rowscale2 = self.drop_path2(torch.ones(
mlp_out.shape[:-1], device=mlp_out.device, dtype=mlp_out.dtype)
)
hidden_states = dropout_add_layer_norm(
mlp_out, hidden_states, self.norm2.weight, self.norm2.bias,
self.dropout2.p if self.training else 0.0, self.norm2.eps,
rowscale=rowscale2, prenorm=False
)
return hidden_states
| flash-attention-main | flash_attn/modules/block.py |
# Copyright (c) 2022, Tri Dao.
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
try:
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
from flash_attn.flash_attn_interface import flash_attn_unpadded_kvpacked_func
except ImportError:
flash_attn_unpadded_qkvpacked_func, flash_attn_unpadded_kvpacked_func = None, None
try:
from flash_attn.ops.flash_attn_triton import flash_attn_qkvpacked_func, flash_attn_kvpacked_func
except ImportError:
flash_attn_qkvpacked_func, flash_attn_kvpacked_func = None, None
try:
from flash_attn.ops.fused_dense import FusedDenseTD, FusedDenseResidual
except ImportError:
FusedDenseTD, FusedDenseResidual = None, None
try:
from flash_attn.layers.rotary import RotaryEmbedding
except ImportError:
RotaryEmbedding = None
class FlashSelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
triton=False, device=None, dtype=None):
super().__init__()
if attention_dropout != 0.0 or not triton:
assert flash_attn_unpadded_qkvpacked_func is not None, 'FlashAttention is not installed'
if attention_dropout == 0.0 and triton:
assert flash_attn_qkvpacked_func is not None, 'FlashAttention Triton is not installed'
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
self.triton = triton
def forward(self, qkv):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
"""
assert qkv.dtype in [torch.float16, torch.bfloat16]
assert qkv.is_cuda
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
if self.triton and (self.dropout_p == 0 or not self.training): # Triton version doesn't support dropout
output = flash_attn_qkvpacked_func(qkv, None, self.causal, self.softmax_scale)
else:
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = seqlen
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=self.causal
)
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
return output
class FlashCrossAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
triton=False, device=None, dtype=None):
super().__init__()
if attention_dropout != 0.0 or not triton:
assert flash_attn_unpadded_kvpacked_func is not None, 'FlashAttention is not installed'
if attention_dropout == 0.0 and triton:
assert flash_attn_kvpacked_func is not None, 'FlashAttention Triton is not installed'
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
self.triton = triton
def forward(self, q, kv):
"""Implements the multihead softmax attention.
Arguments
---------
q: The tensor containing the query. (B, Sq, H, D)
kv: The tensor containing the key and value. (B, Sk, 2, H, D)
"""
assert q.dtype in [torch.float16, torch.bfloat16]
assert q.is_cuda and kv.is_cuda
batch_size, seqlen_q = q.shape[0], q.shape[1]
seqlen_k = kv.shape[1]
assert kv.shape[0] == batch_size and kv.shape[3] == q.shape[2] and kv.shape[4] == q.shape[3]
if self.triton and (self.dropout_p == 0.0 or not self.training): # Triton version doesn't support dropout
output = flash_attn_kvpacked_func(q, kv, None, self.causal, self.softmax_scale)
else:
q = rearrange(q, 'b s ... -> (b s) ...')
kv = rearrange(kv, 'b s ... -> (b s) ...')
cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q,
dtype=torch.int32, device=q.device)
cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k,
dtype=torch.int32, device=kv.device)
output = flash_attn_unpadded_kvpacked_func(
q, kv, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k,
self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale, causal=self.causal
)
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
return output
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
device=None, dtype=None):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if self.causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class CrossAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
device=None, dtype=None):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, q, kv):
"""Implements the multihead softmax attention.
Arguments
---------
q: The tensor containing the query. (B, Sq, H, D)
kv: The tensor containing the key and value. (B, Sk, 2, H, D)
"""
batch_size, seqlen_q = q.shape[0], q.shape[1]
seqlen_k = kv.shape[1]
assert kv.shape[0] == batch_size and kv.shape[3] == q.shape[2] and kv.shape[4] == q.shape[3]
k, v = kv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if self.causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0,
device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDenseResidual.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, cross_attn=False, bias=True, dropout=0.0,
softmax_scale=None, causal=False, dwconv=False, rotary_emb_dim=0,
fused_bias_fc=False, use_flash_attn=False, return_residual=False,
checkpointing=False, device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.cross_attn = cross_attn
self.causal = causal
self.dwconv = dwconv
self.rotary_emb_dim = rotary_emb_dim
self.return_residual = return_residual
self.checkpointing = checkpointing
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
if self.rotary_emb_dim > 0:
assert not cross_attn, 'MHA with rotary embedding does not support cross-attention yet'
assert RotaryEmbedding is not None, 'rotary_emb is not installed'
self.rotary_emb = RotaryEmbedding(self.rotary_emb_dim)
if fused_bias_fc and FusedDenseTD is None:
raise ImportError('fused_dense is not installed')
linear_cls = nn.Linear if not fused_bias_fc else FusedDenseTD
linear_resid_cls = LinearResidual if not fused_bias_fc else FusedDenseResidual
if not self.cross_attn:
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
inner_attn_cls = FlashSelfAttention if use_flash_attn else SelfAttention
else:
# TODO: use the residual linear class for Wq
self.Wq = linear_cls(embed_dim, embed_dim, bias=bias, **factory_kwargs)
self.Wkv = linear_cls(embed_dim, 2 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_q = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, padding=2,
groups=embed_dim)
self.dwconv_kv = nn.Conv1d(2 * embed_dim, 2 * embed_dim, kernel_size=3, padding=2,
groups=2 * embed_dim)
inner_attn_cls = FlashCrossAttention if use_flash_attn else CrossAttention
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout, **factory_kwargs)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, x_kv=None):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim)
x_kv: (batch, seqlen, hidden_dim), only applicable for cross-attention. If None, use x.
"""
if not self.cross_attn:
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
if self.rotary_emb_dim > 0:
qkv = self.rotary_emb(qkv)
if not self.checkpointing:
context = self.inner_attn(qkv)
else:
# context = torch.utils.checkpoint.checkpoint(self._inner_attention, qkv)
context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv)
else:
q = rearrange(self.Wq(x), 'b s (h d) -> b s h d', h=self.num_heads)
kv = rearrange(self.Wkv(x if x_kv is None else x_kv), 'b s (two h d) -> b s two h d',
two=2, h=self.num_heads)
if self.dwconv:
q = rearrange(self.dwconv_q(rearrange(q, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
kv = rearrange(self.dwconv_kv(rearrange(kv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
if not self.checkpointing:
context = self.inner_attn(q, kv)
else:
# context = torch.utils.checkpoint.checkpoint(self._inner_attention, qkv)
context = torch.utils.checkpoint.checkpoint(self.inner_attn, q, kv)
out = self.out_proj(rearrange(context, 'b s h d -> b s (h d)'))
return out if not self.return_residual else (out, x)
| flash-attention-main | flash_attn/modules/mha.py |
from setuptools import setup, find_packages
setup(
name = 'feedback-transformer-pytorch',
packages = find_packages(),
version = '0.0.11',
license='MIT',
description = 'Implementation of Feedback Transformer in Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/feedback-transformer-pytorch',
keywords = [
'attention',
'artificial intelligence',
'transformer',
'deep learning',
'memory'
],
install_requires=[
'torch>=1.6',
'einops'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | feedback-transformer-pytorch-main | setup.py |
from feedback_transformer_pytorch.feedback_transformer_pytorch import FeedbackTransformer
| feedback-transformer-pytorch-main | feedback_transformer_pytorch/__init__.py |
import math
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Memory = namedtuple('Memory', ['keys', 'values'])
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def safe_cat(arr, el, dim = 1):
if not exists(arr):
return el
return torch.cat((arr, el), dim = dim)
# positional embedding
class RelativePositionBias(nn.Module):
def __init__(
self,
causal = False,
num_buckets = 32,
max_distance = 128,
heads = 8
):
super().__init__()
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, qk_dots):
i, j, device = *qk_dots.shape[-2:], qk_dots.device
q_pos = torch.arange(i, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> () h i j')
return bias
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class SkipIf(nn.Module):
def __init__(self, cond, fn):
super().__init__()
self.cond = cond
self.fn = fn
def forward(self, x, *args, **kwargs):
if self.cond(x, *args, **kwargs):
return x
return self.fn(x, *args, **kwargs)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
class FeedForward(nn.Module):
def __init__(
self,
*,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# attention
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, memory, pos_emb = None):
h, n, device = self.heads, x.shape[1], x.device
self_attend = n > 1 # only self attend if going at greater than 1 token at a time
q = self.to_q(x) * self.scale
k, v = memory if exists(memory) else (None, None)
if self_attend:
self_k, self_v = self.to_kv(x).chunk(2, dim = -1)
k = safe_cat(k, self_k, dim = 1)
v = safe_cat(v, self_v, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k)
i, j = sim.shape[-2:]
if exists(pos_emb):
sim = sim + pos_emb(sim)
if self_attend:
causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
causal_mask = rearrange(causal_mask, 'i j -> () () i j')
mask_value = -torch.finfo(q.dtype).max
sim.masked_fill_(causal_mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main class
class FeedbackTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
mem_len,
seq_len = 2,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_dropout = 0.,
keep_last_hidden = False
):
super().__init__()
self.seq_len = seq_len
self.mem_len = mem_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = RelativePositionBias(causal = True, heads = heads)
# main layers
self.layers = nn.ModuleList([])
shared_kv_proj = None
for _ in range(depth):
attn = Attention(dim = dim, heads = heads, dim_head = dim_head, dropout = attn_dropout)
ff = FeedForward(dim = dim, dropout = ff_dropout)
shared_kv_proj = default(shared_kv_proj, attn.to_kv)
attn.to_kv = shared_kv_proj
attn, ff = map(lambda fn: Residual(PreNorm(dim, fn)), (attn, ff))
if seq_len == 1:
memory_is_empty = lambda *args, **kwargs: not exists(kwargs['memory'])
attn = SkipIf(memory_is_empty, attn)
self.layers.append(nn.ModuleList([
attn,
ff
]))
# memory parameters
self.layer_weight = nn.Parameter(torch.ones(depth + 1))
self.shared_kv_proj = shared_kv_proj
self.keep_last_hidden = keep_last_hidden
# final projection to logits
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, memory = None, return_memory = False):
b, n, device = *x.shape, x.device
x = self.token_emb(x)
memory_keys = None
memory_values = None
if exists(memory):
memory_keys, memory_values = memory
outputs = []
# calculate weighting of layers for storing to memory
layer_weight = self.layer_weight.softmax(dim = -1)
layer_weight = rearrange(layer_weight, 'd -> d () () ()')
for x in x.split(self.seq_len, dim = 1):
hiddens = [x]
# prepare memory for attention, if it exists
memory = None
if exists(memory_keys):
memory = (memory_keys, memory_values)
for attn, ff in self.layers:
x = attn(x, memory = memory, pos_emb = self.pos_emb)
x = ff(x)
hiddens.append(x)
outputs.append(x)
# calculate new memory key / values and store to FIFO queue
if self.keep_last_hidden: # secret option for only keeping last hidden layer, as in paper
agg_hiddens = hiddens[-1]
else:
hiddens = torch.stack(hiddens)
agg_hiddens = (hiddens * layer_weight).sum(dim = 0)
# pre-calculate memory key / values and store to buffer
mem_k, mem_v = self.shared_kv_proj(agg_hiddens).chunk(2, dim = -1)
memory_keys = safe_cat(memory_keys, mem_k, dim = 1)
memory_values = safe_cat(memory_values, mem_v, dim = 1)
# enforce max length on memory buffer
memory_keys = memory_keys[:, -self.mem_len:]
memory_values = memory_values[:, -self.mem_len:]
x = torch.cat((outputs), dim = 1)
out = self.to_logits(x)
if not return_memory:
return out
return out, Memory(memory_keys, memory_values)
| feedback-transformer-pytorch-main | feedback_transformer_pytorch/feedback_transformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'memory-transformer-xl',
packages = find_packages(exclude=['examples']),
version = '0.1.0',
license='MIT',
description = 'Memory Transformer-XL, a variant of Transformer-XL that uses linear attention update long term memory',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/memory-transformer-xl',
keywords = ['attention mechanism', 'artificial intelligence', 'transformer', 'deep learning'],
install_requires=[
'torch',
'mogrifier'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | memory-transformer-xl-master | setup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.