python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
from keopscore.formulas.maths.Sqrt import Sqrt from keopscore.formulas.maths.Scalprod import Scalprod class Norm2: def __new__(cls, arg): return Sqrt(Scalprod(arg, arg)) enable_test = False
keops-main
keopscore/keopscore/formulas/maths/Norm2.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.formulas.maths.Log import Log from keopscore.utils.math_functions import keops_xlogx class XLogX(VectorizedScalarOp): """the x*log(x) vectorized operation""" string_id = "XLogX" ScalarOpFun = keops_xlogx @staticmethod def Derivative(f): return Log(f) + 1 # parameters for testing the operation (optional) test_ranges = [(0, 2)] # range of argument torch_op = "lambda x:x*torch.log(x)"
keops-main
keopscore/keopscore/formulas/maths/XLogX.py
from keopscore.formulas.maths.Inv import Inv from keopscore.formulas.variables.IntCst import IntCst class IntInv: def __new__(cls, arg): return Inv(IntCst(arg)) enable_test = False
keops-main
keopscore/keopscore/formulas/maths/IntInv.py
from keopscore.formulas.Chunkable_Op import Chunkable_Op from keopscore.formulas.variables.Zero import Zero from keopscore.utils.code_gen_utils import c_zero_float, VectApply ########################## ###### Sum ##### ########################## class Sum_Impl(Chunkable_Op): # the summation operation string_id = "Sum" dim = 1 def Op(self, out, table, arg): return out.assign(c_zero_float) + VectApply(self.ScalarOp, out, arg) def ScalarOp(self, out, arg): return out.add_assign(arg) def DiffT(self, v, gradin): from keopscore.formulas.maths.SumT import SumT f = self.children[0] return f.DiffT(v, SumT(gradin, f.dim)) def initacc_chunk(self, acc): return f"*{acc.id} = 0.0f;\n" def acc_chunk(self, acc, out): return f"*{acc.id} += *{out.id};\n" # N.B. The following separate function should theoretically be implemented # as a __new__ method of the previous class, but this can generate infinite recursion problems def Sum(arg): if isinstance(arg, Zero): return Zero(1) else: return Sum_Impl(arg)
keops-main
keopscore/keopscore/formulas/maths/Sum.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.formulas.variables.Zero import Zero ########################## ###### Square ##### ########################## class Square_Impl(VectorizedScalarOp): """the square vectorized operation""" string_id = "Square" print_spec = "**2", "post", 1 def ScalarOp(self, out, arg): return out.assign(arg * arg) @staticmethod def Derivative(f): return 2 * f # N.B. The following separate function should theoretically be implemented # as a __new__ method of the previous class, but this can generate infinite recursion problems def Square(arg): if isinstance(arg, Zero): return arg else: return Square_Impl(arg)
keops-main
keopscore/keopscore/formulas/maths/Square.py
from keopscore.formulas.maths.ElemT import ElemT from keopscore.formulas.maths.Concat import Concat from keopscore.formulas.variables.IntCst import IntCst from keopscore.formulas.variables.Var import Var from keopscore.utils.code_gen_utils import GetInds # ////////////////////////////////////////////////////////////////////////////////////////////// # //// Standard basis of R^DIM : < (1,0,0,...) , (0,1,0,...) , ... , (0,...,0,1) > //// # ////////////////////////////////////////////////////////////////////////////////////////////// def StandardBasis(dim): return tuple(ElemT(IntCst(1), dim, i) for i in range(dim)) # ///////////////////////////////////////////////////////////////////////// # //// Matrix of gradient operator (=transpose of jacobian) //// # ///////////////////////////////////////////////////////////////////////// class GradMatrix: def __new__(cls, f, v): f.Vars(cat=3) IndsTempVars = GetInds(f.Vars(cat=3)) newind = 1 if len(IndsTempVars) == 0 else 1 + max(IndsTempVars) gradin = Var(newind, f.dim, 3) packGrads = tuple( f.DiffT(v, gradin).replace(gradin, e) for e in StandardBasis(f.dim) ) res = packGrads[0] for elem in packGrads[1:]: res = Concat(res, elem) return res enable_test = False
keops-main
keopscore/keopscore/formulas/maths/GradMatrix.py
from keopscore.formulas.Chunkable_Op import Chunkable_Op from keopscore.formulas.maths.Sum import Sum from keopscore.utils.code_gen_utils import ( c_zero_float, VectApply, ) from keopscore.utils.math_functions import keops_fma from keopscore.utils.misc_utils import KeOps_Error ########################## ##### Scalprod #### ########################## class Scalprod_Impl(Chunkable_Op): string_id = "Scalprod" print_spec = "|", "mid", 3 dim = 1 def __init__(self, fa, fb): # Output dimension = 1, provided that FA::DIM = FB::DIM self.dimin = fa.dim if self.dimin != fb.dim: KeOps_Error("Dimensions must be the same for Scalprod") super().__init__(fa, fb) def Op(self, out, table, arga, argb): return out.assign(c_zero_float) + VectApply(self.ScalarOp, out, arga, argb) def ScalarOp(self, out, arga, argb): return out.assign(keops_fma(arga, argb, out)) def DiffT(self, v, gradin): fa, fb = self.children return gradin * (fa.DiffT(v, fb) + fb.DiffT(v, fa)) def initacc_chunk(self, acc): return f"*{acc.id} = 0.0f;\n" def acc_chunk(self, acc, out): return f"*{acc.id} += *{out.id};\n" def Scalprod(arg0, arg1): if arg0.dim == 1: return arg0 * Sum(arg1) elif arg1.dim == 1: return Sum(arg0) * arg1 else: return Scalprod_Impl(arg0, arg1)
keops-main
keopscore/keopscore/formulas/maths/Scalprod.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.formulas.variables.Zero import Zero from keopscore.utils.math_functions import keops_sign ########################## ###### Sign ##### ########################## class Sign(VectorizedScalarOp): """the sign vectorized operation""" string_id = "Sign" ScalarOpFun = keops_sign def DiffT(self, v, gradin): return Zero(v.dim)
keops-main
keopscore/keopscore/formulas/maths/Sign.py
from keopscore.formulas.Operation import Operation from keopscore.utils.code_gen_utils import ( c_variable, c_for_loop, ) #################################### ###### Tensor product ##### #################################### class TensorProd(Operation): string_id = "TensorProd" def __init__(self, arg0, arg1): super().__init__(arg0, arg1) self.dim = arg0.dim * arg1.dim def Op(self, out, table, arg0, arg1): q = c_variable("int") loop, k = c_for_loop(0, arg0.dim, 1, pragma_unroll=True) inner_loop, l = c_for_loop(0, arg1.dim, 1, pragma_unroll=True) return f""" #if C_CONTIGUOUS // row major {q.declare_assign(0)} {loop(inner_loop(out[q].assign(arg0[k] * arg1[l]) + q.add_assign(1)))} #else // column major {q.declare_assign(0)} {loop(inner_loop(out[k + l * arg0.dim].assign(arg0[k] * arg1[l]) + q.add_assign(1)))} #endif """ def DiffT(self, v, gradin): from keopscore.formulas import MatVecMult, VecMatMult f, g = self.children return f.DiffT(v, MatVecMult(gradin, g)) + g.DiffT(v, VecMatMult(f, gradin))
keops-main
keopscore/keopscore/formulas/maths/TensorProd.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.utils.math_functions import keops_mod class Mod(VectorizedScalarOp): """the Modulo vectorized operation Mod(x,n,d) = x - n * Floor((x - d)/n) """ string_id = "Mod" ScalarOpFun = keops_mod def DiffT(self, v, gradin): from keopscore.formulas.maths.Floor import Floor # we fall back to an alternative definition of Mod for defining the gradient x, n, d = self.children Mod_alt = x - n * Floor((x - d) / n) return Mod_alt.DiffT(v, gradin) # parameters for testing the operation (optional) nargs = 3 # number of arguments """ # N.B. below is alternative definition as a simple alias. # It is theoretically less efficient when applied to vectors # because we compose operations so evalauation implies the creation # of useless temporary vectors. # If we implement a sort of "fusion" of vectorized scalar operations, # it should be completely equivalent. from keopscore.formulas.maths.Floor import Floor def Mod(x,n,d): return x - n * Floor((x - d)/n) """
keops-main
keopscore/keopscore/formulas/maths/Mod.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.utils.math_functions import keops_exp ########################## ###### Exp ##### ########################## class Exp(VectorizedScalarOp): """the exponential vectorized operation""" string_id = "Exp" ScalarOpFun = keops_exp @staticmethod def Derivative(f): return Exp(f)
keops-main
keopscore/keopscore/formulas/maths/Exp.py
from keopscore.formulas.Operation import Operation from keopscore.utils.code_gen_utils import c_array, VectCopy from keopscore.utils.misc_utils import KeOps_Error # ////////////////////////////////////////////////////////////// # //// VECTOR EXTRACTION : Extract<F,START,DIM> //// # ////////////////////////////////////////////////////////////// class Extract(Operation): string_id = "Extract" def __init__(self, arg0, start, dim): if arg0.dim < start + dim or start < 0: KeOps_Error("Index out of bound in Extract") super().__init__(arg0, params=(start, dim)) self.start = start self.dim = dim def Op(self, out, table, arg0): # returns the atomic piece of c++ code to evaluate the function on arg and return # the result in out v = c_array(arg0.dtype, out.dim, f"({arg0.id}+{self.start})") return VectCopy(out, v) def DiffT(self, v, gradin): from keopscore.formulas.maths.ExtractT import ExtractT f = self.children[0] return f.DiffT(v, ExtractT(gradin, self.start, f.dim)) # parameters for testing the operation (optional) enable_test = True # enable testing for this operation nargs = 1 # number of arguments test_argdims = [10] # dimensions of arguments for testing test_params = [3, 5] # values of parameters for testing torch_op = "lambda x,s,d : x[...,s:(s+d)]" # equivalent PyTorch operation
keops-main
keopscore/keopscore/formulas/maths/Extract.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.formulas.maths.Cos import Cos from keopscore.formulas.maths.Sin import Sin from keopscore.utils.math_functions import keops_sinxdivx class SinXDivX(VectorizedScalarOp): """ the sin(x)/x vectorized operation """ string_id = "SinXDivX" ScalarOpFun = keops_sinxdivx @staticmethod def Derivative(f): return Cos(f) / f - Sin(f) / f**2
keops-main
keopscore/keopscore/formulas/maths/SinXDivX.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.utils.math_functions import keops_atan class Atan(VectorizedScalarOp): """the arc-tangent vectorized operation""" string_id = "Atan" ScalarOpFun = keops_atan @staticmethod def Derivative(f): return 1 / (1 + f**2)
keops-main
keopscore/keopscore/formulas/maths/Atan.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.formulas.maths.Rsqrt import Rsqrt from keopscore.utils.math_functions import keops_acos class Acos(VectorizedScalarOp): """the arc-cosine vectorized operation""" string_id = "Acos" ScalarOpFun = keops_acos @staticmethod def Derivative(f): return -Rsqrt(1 - f**2) # parameters for testing the operation (optional) test_ranges = [(-1, 1)] # range of argument
keops-main
keopscore/keopscore/formulas/maths/Acos.py
from keopscore.formulas.maths.SqNormDiag import SqNormDiag from keopscore.formulas.maths.SqNormIso import SqNormIso from keopscore.formulas.maths.SymSqNorm import SymSqNorm class WeightedSqNorm: """ WeightedSqNorm(A,X) : redirects to SqNormIso, SqNormDiag or SymSqNorm depending on dimension of A. """ string_id = "WeightedSqNorm" def __new__(cls, A, X): if A.dim == 1: return SqNormIso(A, X) elif A.dim == X.dim: return SqNormDiag(A, X) else: return SymSqNorm(A, X) enable_test = True nargs = 2 # number of arguments test_argdims = [5, 5] # dimensions of arguments for testing
keops-main
keopscore/keopscore/formulas/maths/WeightedSqNorm.py
from keopscore.formulas.VectorizedScalarOp import VectorizedScalarOp from keopscore.utils.math_functions import keops_sin class Sin(VectorizedScalarOp): """the Sine vectorized operation""" string_id = "Sin" ScalarOpFun = keops_sin @staticmethod def Derivative(f): from .Cos import Cos return Cos(f)
keops-main
keopscore/keopscore/formulas/maths/Sin.py
from keopscore.formulas.Operation import Operation from keopscore.formulas.variables.Zero import Zero from keopscore.utils.code_gen_utils import ( c_zero_float, c_for_loop, c_if, value, c_variable, ) from keopscore.utils.misc_utils import KeOps_Error ############################ ###### ArgMin ##### ############################ class ArgMin(Operation): string_id = "ArgMin" def __init__(self, f): super().__init__(f) if f.dim < 1: KeOps_Error("ArgMin operation is only possible when dimension is non zero.") self.dim = 1 def Op(self, out, table, arg): tmp = c_variable(out.dtype) loop, k = c_for_loop(1, arg.dim, 1, pragma_unroll=True) string = value(out).assign(c_zero_float) + tmp.declare_assign(arg[0]) if out.dtype == "half2": loop_string = f""" // we have to work element-wise... __half2 cond = __hgt2({tmp.id},{arg[k].id}); // cond = (tmp > outF[k]) (element-wise) __half2 negcond = __float2half2_rn(1.0f)-cond; // negcond = 1-cond *{out.id} = cond * __float2half2_rn({k.id}) + negcond * *{out.id}; // out = cond * k + (1-cond) * out {tmp.id} = cond * {arg[k].id} + negcond * {tmp.id}; // tmp = cond * outF[k] + (1-cond) * tmp """ string += loop(loop_string) else: string += loop( c_if(arg[k] < tmp, tmp.assign(arg[k]) + value(out).assign(k)) ) return string def DiffT(self, v, gradin): return Zero(v.dim) # parameters for testing the operation (optional) enable_test = True # enable testing for this operation nargs = 1 # number of arguments test_argdims = [5] # dimensions of arguments for testing torch_op = "lambda x : torch.argmin(x, dim=-1, keepdim=True).type(x.dtype)" no_torch_grad = True
keops-main
keopscore/keopscore/formulas/maths/ArgMin.py
from keopscore.utils.code_gen_utils import VectCopy from keopscore.formulas.reductions.Min_ArgMin_Reduction_Base import ( Min_ArgMin_Reduction_Base, ) from keopscore.formulas.reductions.Zero_Reduction import Zero_Reduction class ArgMin_Reduction(Min_ArgMin_Reduction_Base): """Implements the argmin reduction operation : for each i or each j, find the index of the minimal value of Fij operation is vectorized: if Fij is vector-valued, argmin is computed for each dimension.""" string_id = "ArgMin_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = formula.dim def FinalizeOutput(self, acc, out, i): acc_val, acc_ind = acc.split(self.dim, self.dim) return VectCopy(out, acc_ind) def DiffT(self, v, gradin): return Zero_Reduction(v.dim, v.cat % 2)
keops-main
keopscore/keopscore/formulas/reductions/ArgMin_Reduction.py
from keopscore.utils.code_gen_utils import VectCopy from keopscore.formulas.reductions.Max_ArgMax_Reduction_Base import ( Max_ArgMax_Reduction_Base, ) from keopscore.formulas.reductions.Zero_Reduction import Zero_Reduction class ArgMax_Reduction(Max_ArgMax_Reduction_Base): """Implements the argmax reduction operation : for each i or each j, find the index of the maximal value of Fij operation is vectorized: if Fij is vector-valued, argmax is computed for each dimension.""" string_id = "ArgMax_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = formula.dim def FinalizeOutput(self, acc, out, i): acc_val, acc_ind = acc.split(self.dim, self.dim) return VectCopy(out, acc_ind) def DiffT(self, v, gradin): return Zero_Reduction(v.dim, v.cat % 2)
keops-main
keopscore/keopscore/formulas/reductions/ArgMax_Reduction.py
from keopscore.formulas.maths import Concat, Exp, Extract from keopscore.formulas.reductions.Reduction import Reduction from keopscore.formulas.reductions.Sum_Reduction import Sum_Reduction from keopscore.formulas.variables.IntCst import IntCst from keopscore.utils.code_gen_utils import ( neg_infinity, c_zero_float, new_c_varname, c_variable, c_for_loop, ) from keopscore.utils.math_functions import keops_exp from keopscore.utils.misc_utils import KeOps_Error class Max_SumShiftExpWeight_Reduction(Reduction): """Implements the coupled reduction operation m_i=max_j f_ij, s_i=sum_j exp(f_ij-m_i) g_ij where f and g are two formulas. f must be scalar-valued. This reduction is the base for numerically stable computation of log-sum-exp and softmax type reductions.""" string_id = "Max_SumShiftExpWeight_Reduction" def __init__(self, formulaF, tagIJ, formulaG=IntCst(1)): if formulaF.dim != 1: KeOps_Error( "Max_SumShiftExpWeight_Reduction requires first formula of dimension 1." ) super().__init__(Concat(formulaF, formulaG), tagIJ) self.formulaF = formulaF self.formulaG = formulaG self.dim = formulaF.dim + formulaG.dim # dimension of final output of reduction self.dimred = self.dim # dimension of inner reduction variables def InitializeReduction(self, acc): """Returns C++ code to be used at initialization phase of the reduction. We fill empty cells with the neutral element of the reduction operation, (-inf,0) = e^{-inf} * 0 = 0""" m, s = acc.split(1, self.formulaG.dim) return m.assign(neg_infinity(acc.dtype)) + s.assign(c_zero_float) def ReducePair(self, acc, xi): """Returns C++ code that implements the update phase of the reduction. (m,s) + (m',s'), i.e. exp(m)*s + exp(m')*s'""" if xi.dtype == "half2": KeOps_Error("Not implemented.") tmpexp = c_variable(acc.dtype, new_c_varname("tmpexp")) loop, k = c_for_loop(1, self.dimred, 1, pragma_unroll=True) return f""" {tmpexp.declare()} if ({acc.id}[0] > {xi.id}[0]) {{ // = exp(m) * (s + s'*exp(m'-m)) if m > m' {tmpexp.assign(keops_exp(xi[0]-acc[0]))} {loop(acc[k].add_assign(xi[k]*tmpexp))} }} else {{ // = exp(m') * (s' + exp(m-m')*s) if m <= m' {tmpexp.assign(keops_exp(acc[0]-xi[0]))} {loop(acc[k].assign(xi[k]+tmpexp*acc[k]))} {acc[0].assign(xi[0])} }} """ def ReducePairShort(self, acc, xi, ind): return self.ReducePair(acc, xi) def KahanScheme(self, acc, xi, tmp): if xi.dtype == "half2": KeOps_Error("Not implemented.") tmpexp = c_variable(acc.dtype, new_c_varname("tmpexp")) loop, k = c_for_loop(1, self.dimred, 1, pragma_unroll=True) a = c_variable(acc.dtype, new_c_varname("a")) b = c_variable(acc.dtype, new_c_varname("b")) u = c_variable(acc.dtype, new_c_varname("u")) return f""" {tmpexp.declare()} if ({acc.id}[0] > {xi.id}[0]) // = exp(m) * (s + s'*exp(m'-m)) if m > m' {{ {tmpexp.assign(keops_exp(xi[0]-acc[0]))} {loop( a.declare_assign(xi[k]*tmpexp-tmp[k-1]) + b.declare_assign(acc[k]+a) + tmp[k-1].assign((b-acc[k])-a) + acc[k].assign(b))} }} else // = exp(m') * (s' + exp(m-m')*s) if m <= m' {{ {tmpexp.assign(keops_exp(acc[0]-xi[0]))} {loop( u.declare_assign(tmpexp*acc[k]) + a.declare_assign(xi[k]-tmpexp*tmp[k-1]) + b.declare_assign(u+a) + tmp[k-1].assign((b-u)-a) + acc[k].assign(b))} {acc[0].assign(xi[0])} }} """ def DiffT(self, v, gradin, MS): """ // Beware: the formula that we use for the gradient is *only* valid // if the output [M,S] = Max_SumShiftExp(F,G) has been flattened through a // L = M + log(S) (Log-Sum-Exp) or a weighted Soft-Max // operation (as done by the Python bindings), and if // GRADIN = [Grad(L), Grad(L)/S ] // has been backpropagated from L. """ from keopscore.formulas.autodiff import Grad M = Extract(MS, 0, self.formulaF.dim) S = Extract(gradin, self.formulaF.dim, self.formulaG.dim) return Grad( Sum_Reduction(Exp(self.formulaF - M) * self.formulaG, self.tagI), v, S ) Max_SumShiftExp_Reduction = Max_SumShiftExpWeight_Reduction
keops-main
keopscore/keopscore/formulas/reductions/Max_SumShiftExpWeight_Reduction.py
from keopscore.formulas.reductions.KMin_ArgKMin_Reduction import KMin_ArgKMin_Reduction from keopscore.utils.code_gen_utils import ( infinity, cast_to, c_zero_float, c_for_loop, c_variable, new_c_varname, c_if, ) class KMin_Reduction(KMin_ArgKMin_Reduction): """Implements the k-min reduction operation : for each i or each j, find the k minimal values of Fij operation is vectorized: if Fij is vector-valued, arg-k-min is computed for each dimension.""" string_id = "KMin_Reduction" def __init__(self, formula, K, tagIJ): super().__init__(formula, K, tagIJ) self.dim = K * formula.dim def FinalizeOutput(self, acc, out, i): fdim, K = self.formula.dim, self.K outer_loop, k = c_for_loop(0, fdim, 1) inner_loop, l = c_for_loop(k, k + (2 * fdim * K), 2 * fdim) p = c_variable("int", new_c_varname("p")) return outer_loop( p.declare_assign(k) + inner_loop(out[p].assign(acc[l]) + p.add_assign(fdim)) )
keops-main
keopscore/keopscore/formulas/reductions/KMin_Reduction.py
from keopscore.utils.code_gen_utils import VectApply, VectCopy from keopscore.utils.Tree import Tree class Reduction(Tree): """Base class for all KeOps final reductions over a formula""" def __init__(self, formula, tagI): """- formula is an object of type Operation, it is the formula on which we apply a reduction - tagI : 0 or 1, specifies wether we do the reduction over "i"-indexed or "j"-indexed variables.""" # We initialize several constants, most of them infered from the formula self.formula = formula self.children = [formula] self.params = (tagI,) self.tagI = tagI self.tagJ = 1 - tagI self.cat = tagI self.Vars_ = formula.Vars_ def ReducePair(self, acc, xi): """Returns C++ code that implements the update phase of the reduction. by default it consists in a vectorized version of the ReducePairScalar operation.""" return VectApply(self.ReducePairScalar, acc, xi) def ReducePairShort(self, acc, xi, ind): # N.B next lines are useless here, but to be used in other reductions : # if xi.dtype == "half2": # half2_val = c_variable("half2_ind") # string = half2_val.declare_assign(f"__floats2half2_rn(2*{ind()},2*{ind()}+1)") return self.ReducePair(acc, xi) def FinalizeOutput(self, acc, out, i): """Returns C++ code that implements the final output of the reduction. For most reducitons it is a simple copy of the temporary variable updated during the reduction, with possibly a cast if the accumulator was of different data type.""" return VectCopy(out, acc)
keops-main
keopscore/keopscore/formulas/reductions/Reduction.py
from keopscore.utils.code_gen_utils import ( neg_infinity, c_zero_float, VectApply, c_if, ) from keopscore.formulas.reductions.Reduction import Reduction from keopscore.utils.misc_utils import KeOps_Error class Max_ArgMax_Reduction_Base(Reduction): """max+argmax reduction : base class""" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) # We work with a (values,indices) vector self.dimred = 2 * formula.dim # dimension of inner reduction variables def InitializeReduction(self, acc): # Returns C++ code to be used at initialization phase of the reduction. dim = self.formula.dim acc_max, acc_argmax = acc.split(dim, dim) return acc_max.assign(neg_infinity(acc.dtype)) + acc_argmax.assign(c_zero_float) def ReducePairScalar(self, acc_val, acc_ind, xi, ind): # Subroutine of ReducePairShort and ReducePair methods. if xi.dtype == "half2": KeOps_Error("not implemented") return c_if(xi > acc_val, acc_val.assign(xi) + acc_ind.assign(ind)) def ReducePair(self, acc, xi): # Returns C++ code that implements the update phase of the reduction. dim = self.formula.dim acc_val, acc_ind = acc.split(dim, dim) xi_val, xi_ind = xi.split(dim, dim) return VectApply(self.ReducePairScalar, acc_val, xi_val, xi_ind) def ReducePairShort(self, acc, xi, ind): if xi.dtype == "half2": KeOps_Error("not implemented") half2_val = c_variable("half2_ind") string = half2_val.declare_assign( f"__floats2half2_rn(2*{ind()},2*{ind()}+1)" ) dim = self.formula.dim acc_val, acc_ind = acc.split(dim, dim) return VectApply(self.ReducePairScalar, acc_val, acc_ind, xi, ind)
keops-main
keopscore/keopscore/formulas/reductions/Max_ArgMax_Reduction_Base.py
from keopscore.utils.code_gen_utils import ( infinity, c_zero_float, VectApply, c_if, ) from keopscore.formulas.reductions.Reduction import Reduction from keopscore.utils.misc_utils import KeOps_Error class Min_ArgMin_Reduction_Base(Reduction): """min+argmin reduction : base class""" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) # We work with a (values,indices) vector self.dimred = 2 * formula.dim # dimension of inner reduction variables def InitializeReduction(self, acc): # Returns C++ code to be used at initialization phase of the reduction. dim = self.formula.dim acc_min, acc_argmin = acc.split(dim, dim) return acc_min.assign(infinity(acc.dtype)) + acc_argmin.assign(c_zero_float) def ReducePairScalar(self, acc_val, acc_ind, xi, ind): # Subroutine of ReducePairShort and ReducePair methods. if xi.dtype == "half2": KeOps_Error("not implemented") return c_if(xi < acc_val, acc_val.assign(xi) + acc_ind.assign(ind)) def ReducePair(self, acc, xi): # Returns C++ code that implements the update phase of the reduction. dim = self.formula.dim acc_val, acc_ind = acc.split(dim, dim) xi_val, xi_ind = xi.split(dim, dim) return VectApply(self.ReducePairScalar, acc_val, xi_val, xi_ind) def ReducePairShort(self, acc, xi, ind): if xi.dtype == "half2": KeOps_Error("not implemented") half2_val = c_variable("half2_ind") string = half2_val.declare_assign( f"__floats2half2_rn(2*{ind()},2*{ind()}+1)" ) dim = self.formula.dim acc_val, acc_ind = acc.split(dim, dim) return VectApply(self.ReducePairScalar, acc_val, acc_ind, xi, ind)
keops-main
keopscore/keopscore/formulas/reductions/Min_ArgMin_Reduction_Base.py
from keopscore.utils.code_gen_utils import VectCopy from keopscore.formulas.reductions.Max_ArgMax_Reduction_Base import ( Max_ArgMax_Reduction_Base, ) class Max_ArgMax_Reduction(Max_ArgMax_Reduction_Base): """Implements the max+argmax reduction operation : for each i or each j, find the maximal value of Fij and its index operation is vectorized: if Fij is vector-valued, max+argmax is computed for each dimension.""" string_id = "Max_ArgMax_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = 2 * formula.dim def FinalizeOutput(self, acc, out, i): return VectCopy(out, acc)
keops-main
keopscore/keopscore/formulas/reductions/Max_ArgMax_Reduction.py
from keopscore.formulas.reductions.KMin_ArgKMin_Reduction import KMin_ArgKMin_Reduction from keopscore.formulas.reductions.Zero_Reduction import Zero_Reduction from keopscore.utils.code_gen_utils import ( c_for_loop, new_c_varname, c_variable, ) class ArgKMin_Reduction(KMin_ArgKMin_Reduction): """Implements the arg-k-min reduction operation : for each i or each j, find the indices of the k minimal values of Fij operation is vectorized: if Fij is vector-valued, arg-k-min is computed for each dimension.""" string_id = "ArgKMin_Reduction" def __init__(self, formula, K, tagIJ): super().__init__(formula, K, tagIJ) self.dim = K * formula.dim def FinalizeOutput(self, acc, out, i): fdim = self.formula.dim p = c_variable("int", new_c_varname("p")) loop, k = c_for_loop(0, fdim, 1, pragma_unroll=True) body = p.declare_assign(k) inner_loop, l = c_for_loop( k, k + 2 * self.K * fdim, 2 * fdim, pragma_unroll=True ) body += inner_loop(out[p].assign(acc[l + fdim]) + p.add_assign(fdim)) return loop(body) outer_body def DiffT(self, v, gradin): return Zero_Reduction(v.dim, v.cat % 2)
keops-main
keopscore/keopscore/formulas/reductions/ArgKMin_Reduction.py
from keopscore.utils.code_gen_utils import ( c_array, c_zero_float, c_if, c_variable, ) class Sum_Scheme: def __init__(self, red_formula, dtype, dimred=None): self.red_formula = red_formula if dimred is None: self.dimred = red_formula.dimred else: self.dimred = dimred def declare_temporary_accumulator(self): return self.tmp_acc.declare() def initialize_temporary_accumulator_first_init(self): return "" def initialize_temporary_accumulator_block_init(self): return "" def periodic_accumulate_temporary(self, acc, j): return "" def final_operation(self, acc): return "" class direct_sum(Sum_Scheme): def declare_temporary_accumulator(self): return "" def initialize_temporary_accumulator(self): return "" def accumulate_result(self, acc, fout, j, hack=False): return self.red_formula.ReducePairShort(acc, fout, j) class block_sum(Sum_Scheme): def __init__(self, red_formula, dtype, dimred=None): super().__init__(red_formula, dtype, dimred) self.tmp_acc = c_array(dtype, self.dimred, "tmp") def initialize_temporary_accumulator(self): return ( "int period_accumulate = ny<10 ? 100 : sqrt(ny);\n" + self.red_formula.InitializeReduction(self.tmp_acc) ) def initialize_temporary_accumulator_block_init(self): return self.red_formula.InitializeReduction(self.tmp_acc) def accumulate_result(self, acc, fout, j, hack=False): tmp_acc = acc if hack else self.tmp_acc return self.red_formula.ReducePairShort(tmp_acc, fout, j) def periodic_accumulate_temporary(self, acc, j): condition = c_variable("bool", f"!(({j.id}+1)%period_accumulate)") return c_if( condition, self.red_formula.ReducePair(acc, self.tmp_acc) + self.red_formula.InitializeReduction(self.tmp_acc), ) def final_operation(self, acc): return self.red_formula.ReducePair(acc, self.tmp_acc) class kahan_scheme(Sum_Scheme): def __init__(self, red_formula, dtype, dimred=None): super().__init__(red_formula, dtype, dimred) self.tmp_acc = c_array(dtype, red_formula.dim_kahan, "tmp") def initialize_temporary_accumulator(self): return self.tmp_acc.assign(c_zero_float) def initialize_temporary_accumulator_first_init(self): return self.initialize_temporary_accumulator() def accumulate_result(self, acc, fout, j, hack=False): return self.red_formula.KahanScheme(acc, fout, self.tmp_acc)
keops-main
keopscore/keopscore/formulas/reductions/sum_schemes.py
from .ArgKMin_Reduction import ArgKMin_Reduction from .ArgMax_Reduction import ArgMax_Reduction from .ArgMin_Reduction import ArgMin_Reduction from .KMin_ArgKMin_Reduction import KMin_ArgKMin_Reduction from .KMin_Reduction import KMin_Reduction from .Max_ArgMax_Reduction import Max_ArgMax_Reduction from .Max_Reduction import Max_Reduction from .Max_SumShiftExpWeight_Reduction import ( Max_SumShiftExpWeight_Reduction, Max_SumShiftExp_Reduction, ) from .Min_ArgMin_Reduction import Min_ArgMin_Reduction from .Min_Reduction import Min_Reduction from .Sum_Reduction import Sum_Reduction from .Zero_Reduction import Zero_Reduction from .sum_schemes import *
keops-main
keopscore/keopscore/formulas/reductions/__init__.py
from keopscore.formulas.variables import Zero from keopscore.formulas.reductions.Reduction import Reduction class Zero_Reduction(Reduction): """Implements the zero reduction operation (fills output with zeros). N.B. The actual code for filling zeros is not here ; when a Zero_reduction is detected, the map_reduce scheme is redirected to CpuAssignZero or GpuAssignZero""" string_id = "Zero_Reduction" def __init__(self, dim, tagIJ): super().__init__(Zero(dim), tagIJ) self.dim = dim def DiffT(self, v, gradin, f0=None): return Zero_Reduction(v.dim, v.cat % 2)
keops-main
keopscore/keopscore/formulas/reductions/Zero_Reduction.py
from keopscore.utils.code_gen_utils import VectCopy from keopscore.formulas.reductions.Min_ArgMin_Reduction_Base import ( Min_ArgMin_Reduction_Base, ) class Min_ArgMin_Reduction(Min_ArgMin_Reduction_Base): """Implements the min+argmin reduction operation : for each i or each j, find the minimal value of Fij and its index operation is vectorized: if Fij is vector-valued, min+argmain is computed for each dimension.""" string_id = "Min_ArgMin_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = 2 * formula.dim def FinalizeOutput(self, acc, out, i): return VectCopy(out, acc)
keops-main
keopscore/keopscore/formulas/reductions/Min_ArgMin_Reduction.py
from keopscore.utils.code_gen_utils import ( c_zero_float, c_for_loop, c_variable, new_c_varname, ) from keopscore.formulas.reductions.Reduction import Reduction class Sum_Reduction(Reduction): """Sum reduction class""" string_id = "Sum_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = formula.dim # dimension of final output of reduction self.dimred = self.dim # dimension of inner reduction variables self.dim_kahan = self.dim def InitializeReduction(self, tmp): # Returns C++ code to be used at initialization phase of the reduction. # Here it consists in setting the output array to zero. return tmp.assign(c_zero_float) def ReducePairScalar(self, tmp, xi): # Subroutine of ReducePairShort and ReducePair methods. # Returns C++ code that implements the "+=" accumulation operation of the sum reduction return tmp.add_assign(xi) def KahanScheme(self, acc, xi, tmp): loop, k = c_for_loop(0, self.dim, 1, pragma_unroll=True) a = c_variable(acc.dtype, new_c_varname("a")) b = c_variable(acc.dtype, new_c_varname("b")) return loop( a.declare_assign(xi[k] - tmp[k]) + b.declare_assign(acc[k] + a) + tmp[k].assign((b - acc[k]) - a) + acc[k].assign(b) ) def DiffT(self, v, gradin, f0=None): from keopscore.formulas.autodiff import Grad return Sum_Reduction(Grad(self.formula, v, gradin), v.cat % 2)
keops-main
keopscore/keopscore/formulas/reductions/Sum_Reduction.py
from keopscore.utils.code_gen_utils import infinity, c_if from keopscore.formulas.reductions.Reduction import Reduction from keopscore.utils.misc_utils import KeOps_Error class Min_Reduction(Reduction): """Implements the min reduction operation : for each i or each j, find the minimal value of Fij operation is vectorized: if Fij is vector-valued, min is computed for each dimension.""" string_id = "Min_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = formula.dim # dimension of final output of reduction self.dimred = self.dim # dimension of inner reduction variables def InitializeReduction(self, acc): # Returns C++ code to be used at initialization phase of the reduction. # Here it consists in setting the output array to -infinity. return acc.assign(infinity(acc.dtype)) def ReducePairScalar(self, acc, xi): # Subroutine of ReducePairShort and ReducePair methods. if xi.dtype == "half2": KeOps_Error("not implemented") return c_if(xi < acc, acc.assign(xi))
keops-main
keopscore/keopscore/formulas/reductions/Min_Reduction.py
from keopscore.utils.code_gen_utils import neg_infinity, c_if from keopscore.formulas.reductions.Reduction import Reduction from keopscore.utils.misc_utils import KeOps_Error class Max_Reduction(Reduction): """Implements the max reduction operation : for each i or each j, find the maximal value of Fij operation is vectorized: if Fij is vector-valued, max is computed for each dimension.""" string_id = "Max_Reduction" def __init__(self, formula, tagIJ): super().__init__(formula, tagIJ) self.dim = formula.dim # dimension of final output of reduction self.dimred = self.dim # dimension of inner reduction variables def InitializeReduction(self, acc): # Returns C++ code to be used at initialization phase of the reduction. # Here it consists in setting the output array to -infinity. return acc.assign(neg_infinity(acc.dtype)) def ReducePairScalar(self, acc, xi): # Subroutine of ReducePairShort and ReducePair methods. if xi.dtype == "half2": KeOps_Error("not implemented") return c_if(xi > acc, acc.assign(xi))
keops-main
keopscore/keopscore/formulas/reductions/Max_Reduction.py
from keopscore.utils.code_gen_utils import ( infinity, cast_to, c_zero_float, c_for_loop, c_variable, new_c_varname, c_if, c_array, use_pragma_unroll, ) from keopscore.formulas.reductions.Reduction import Reduction from keopscore.utils.misc_utils import KeOps_Error class KMin_ArgKMin_Reduction(Reduction): """Implements the k-min-arg-k-min reduction operation : for each i or each j, find the values and indices of the k minimal values of Fij operation is vectorized: if Fij is vector-valued, arg-k-min is computed for each dimension.""" string_id = "KMin_ArgKMin_Reduction" def __init__(self, formula, K, tagIJ): super().__init__(formula, tagIJ) self.K = K # dim is dimension of output of reduction ; for a arg-k-min reduction it is equal to the dimension of output of formula self.dim = 2 * K * formula.dim # We work with a (values,indices) vector self.dimred = self.dim # dimension of inner reduction variables def InitializeReduction(self, acc): # Returns C++ code to be used at initialization phase of the reduction. if acc.dtype == "half2": KeOps_Error("not implemented") fdim, K = self.formula.dim, self.K outer_loop, k = c_for_loop(0, fdim, 1, pragma_unroll=True) inner_loop, l = c_for_loop(k, k + (2 * K * fdim), 2 * fdim, pragma_unroll=True) return outer_loop( inner_loop( acc[l].assign(infinity(acc.dtype)) + acc[l + fdim].assign(c_zero_float) ) ) def ReducePair(self, acc, xi): # Returns C++ code that implements the update phase of the reduction. dtype = xi.dtype fdim = self.formula.dim out = c_array(dtype, self.dimred, new_c_varname("out")) outer_loop, k = c_for_loop(0, fdim, 1) p = c_variable("int", new_c_varname("p")) q = c_variable("int", new_c_varname("q")) inner_loop, l = c_for_loop(k, self.dimred, 2 * fdim) inner_body = c_if( xi[p] < acc[q], out[l].assign(xi[p]) + out[l + fdim].assign(xi[p + fdim]) + p.add_assign(2 * fdim), out[l].assign(acc[q]) + out[l + fdim].assign(acc[q + fdim]) + q.add_assign(2 * fdim), ) outer_body = p.declare_assign(k) + q.declare_assign(k) + inner_loop(inner_body) final_loop, k = c_for_loop(0, self.dimred, 1) return ( out.declare() + outer_loop(outer_body) + final_loop(acc[k].assign(out[k])) ) def ReducePairShort(self, acc, xi, ind): fdim, K = self.formula.dim, self.K dtype = xi.dtype xik = c_variable(dtype, new_c_varname("xik")) l = c_variable("int", new_c_varname("l")) k = c_variable("int", new_c_varname("k")) tmpl = c_variable(dtype, new_c_varname("tmpl")) indtmpl = c_variable("int", new_c_varname("indtmpl")) return f""" {{ {xik.declare()} {l.declare()} {use_pragma_unroll()} for(int {k.id}=0; {k.id}<{fdim}; {k.id}++) {{ {xik.assign(xi[k])} {use_pragma_unroll()} for({l.id}={(k+(K-1)*2*fdim).id}; {l.id}>={k.id} && {(xik<acc[l]).id}; {l.id}-={2*fdim}) {{ {tmpl.declare_assign(acc[l])} {indtmpl.declare_assign(acc[l+fdim])} {acc[l].assign(xik)} {acc[l+fdim].assign(ind)} if({l.id}<{(k+(2*fdim*(K-1))).id}) {{ {acc[l+2*fdim].assign(tmpl)} {acc[l+2*fdim+fdim].assign(indtmpl)} }} }} }} }} """
keops-main
keopscore/keopscore/formulas/reductions/KMin_ArgKMin_Reduction.py
""" PyTorch, on the GPU =========================== """ #################################### # Blabla # import torch import numpy as np from time import time nits = 100 Ns, D = [10000, 100000, 1000000], 3 def KP(x, y, b): D_xx = (x * x).sum(-1).unsqueeze(1) # (N,1) D_xy = torch.matmul(x, y.permute(1, 0)) # (N,D) @ (D,M) = (N,M) D_yy = (y * y).sum(-1).unsqueeze(0) # (1,M) D_xy = D_xx - 2 * D_xy + D_yy K_xy = (-D_xy).exp() return K_xy @ b for N in Ns: # Generate the data x = torch.randn(N, D).cuda() y = torch.randn(N, D).cuda() p = torch.randn(N, 1).cuda() # First run to warm-up the device... p = KP(x, y, p) # Actual timings: start = time() for _ in range(nits): p = KP(x, y, p) torch.cuda.synchronize() end = time() print("Timing with {} points: {} x {:.4f}s".format(N, nits, (end - start) / nits))
keops-main
benchmarks/PyTorch_GPU.py
""" PyTorch, on a TPU ============================ """ ################################################################ # This code should be run on a Google Colab session, with TPU acceleration. # import os assert os.environ[ "COLAB_TPU_ADDR" ], "Make sure to select TPU from Edit > Notebook settings > Hardware accelerator" ################################################### # DIST_BUCKET = "gs://tpu-pytorch/wheels" TORCH_WHEEL = "torch-1.15-cp36-cp36m-linux_x86_64.whl" TORCH_XLA_WHEEL = "torch_xla-1.15-cp36-cp36m-linux_x86_64.whl" TORCHVISION_WHEEL = "torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl" # Install Colab TPU compat PyTorch/TPU wheels and dependencies """ !pip uninstall -y torch torchvision !gsutil cp "$DIST_BUCKET/$TORCH_WHEEL" . !gsutil cp "$DIST_BUCKET/$TORCH_XLA_WHEEL" . !gsutil cp "$DIST_BUCKET/$TORCHVISION_WHEEL" . !pip install "$TORCH_WHEEL" !pip install "$TORCH_XLA_WHEEL" !pip install "$TORCHVISION_WHEEL" !sudo apt-get install libomp5 """ ################################################### # import torch import torch_xla import torch_xla.core.xla_model as xm t = torch.randn(2, 2, device=xm.xla_device()) print(t.device) print(t) ################################################### # # Run the cell below two times: once for the compilation, once for profiling! # nits = 100 Ns, D = [10000, 100000, 1000000], 3 for N in Ns: x = torch.randn(N, D, device=xm.xla_device()) y = torch.randn(N, D, device=xm.xla_device()) p = torch.randn(N, 1, device=xm.xla_device()) def KP(x, y, p): D_xx = (x * x).sum(-1).unsqueeze(1) # (N,1) D_xy = torch.matmul(x, y.permute(1, 0)) # (N,D) @ (D,M) = (N,M) D_yy = (y * y).sum(-1).unsqueeze(0) # (1,M) D_xy = D_xx - 2 * D_xy + D_yy K_xy = (-D_xy).exp() return K_xy @ p import time start = time.time() for _ in range(nits): p = KP(x, y, p) print(p) end = time.time() print("Timing with {} points: {} x {:.4f}s".format(N, nits, (end - start) / nits))
keops-main
benchmarks/PyTorch_TPU.py
""" KeOps specific ======================================== """ ######################### # (N.B.: with data on device would be slightly better!) # import torch import numpy as np from time import time ###################################################################### # Benchmark specifications: # nits = 1 Ns, D = [10000, 100000, 1000000], 3 dtype = "float32" from pykeops.numpy import RadialKernelConv my_conv = RadialKernelConv(dtype) def KP(x, y, p): return my_conv(x, y, p, 1.0, kernel="gaussian") for N in Ns: # Generate the data x = np.random.randn(N, D).astype(dtype) y = np.random.randn(N, D).astype(dtype) p = np.random.randn(N, 1).astype(dtype) # First run just in case... p = KP(x, y, p) # Timings for KeOps specific start = time() for _ in range(nits): p = KP(x, y, p) end = time() print("Timing with {} points: {} x {:.4f}s".format(N, nits, (end - start) / nits))
keops-main
benchmarks/KeOps_specific.py
""" KeOps ===== """ import torch import numpy as np from time import time nits = 10 Ns, D = [10000, 100000, 1000000], 3 from pykeops.torch import generic_sum KP = generic_sum( "Exp(-SqDist(X,Y)) * B", # Formula "A = Vi(1)", # Output "X = Vi({})".format(D), # 1st argument "Y = Vj({})".format(D), # 2nd argument "B = Vj(1)", ) # 3rd argument for N in Ns: # Generate the data x = torch.randn(N, D).cuda() y = torch.randn(N, D).cuda() p = torch.randn(N, 1).cuda() # First run just in case... p = KP(x, y, p) # Timings for KeOps start = time() for _ in range(nits): p = KP(x, y, p) end = time() print("Timing with {} points: {} x {:.4f}s".format(N, nits, (end - start) / nits))
keops-main
benchmarks/KeOps.py
""" TVM =============== """ ########################## # If you're running this script on Google Colab, use the following lines to install TVM on your session: #%matplotlib inline # # try: # import google.colab # IN_COLAB = True # except: # IN_COLAB = False # # if IN_COLAB: # ! gsutil cp "gs://tvm-fcrc-binaries-7f775516ff9dfab922c304049f294cec/tvm.tar.gz" /tmp/tvm.tar.gz # ! mkdir -p /tvm # ! tar -xf /tmp/tvm.tar.gz --strip-components=4 --directory /tvm # ! ls -la /tvm # ! bash /tvm/package.sh # # Add TVM to the Python path. # import sys # sys.path.append('/tvm/python') # sys.path.append('/tvm/topi/python') # sys.path.append('/tvm/nnvm/python') # sys.path.append('/tvm/vta/python') # else: # print("Notebook executing locally, skipping Colab setup ...") ################################################################### # Actual benchmark: from __future__ import absolute_import, print_function import tvm import numpy as np from time import time # Global declarations of environment. tgt_host = "llvm" tgt = "cuda" ctx = tvm.context(tgt, 0) # Declare axis and reduction indices n = tvm.var("n") j = tvm.reduce_axis((0, n), "j") # Declare Variable A0 = tvm.placeholder((n,), name="A0", dtype="float32") A1 = tvm.placeholder((n,), name="A1", dtype="float32") A2 = tvm.placeholder((n,), name="A2", dtype="float32") B0 = tvm.placeholder((n,), name="B0", dtype="float32") B1 = tvm.placeholder((n,), name="B1", dtype="float32") B2 = tvm.placeholder((n,), name="B2", dtype="float32") D = tvm.placeholder((n,), name="D", dtype="float32") D_ij = ( lambda i: (A0[i] - B0[j]) * (B0[j] - A0[i]) + (A1[i] - B1[j]) * (B1[j] - A1[i]) + (A2[i] - B2[j]) * (B2[j] - A2[i]) ) K_ij = lambda i: tvm.call_pure_extern("float32", "__expf", D_ij(i)) C0 = tvm.compute((n,), lambda i: tvm.sum(K_ij(i) * D[j], axis=j), name="C0") # Scheduled the computation s0 = tvm.create_schedule(C0.op) bx, tx = s0[C0].split(C0.op.axis[0], factor=192) s0[C0].bind(bx, tvm.thread_axis("blockIdx.x")) s0[C0].bind(tx, tvm.thread_axis("threadIdx.x")) # Actually build the binary fconv0 = tvm.build( s0, [A0, A1, A2, B0, B1, B2, D, C0], tgt, target_host=tgt_host, name="myconv0" ) # Benchmark nits = 10 Ns = [10000, 100000, 1000000] for n in Ns: a_np = np.random.randn(n, 3).astype(A0.dtype) a0 = tvm.nd.array(a_np[:, 0], ctx) a1 = tvm.nd.array(a_np[:, 1], ctx) a2 = tvm.nd.array(a_np[:, 2], ctx) b_np = np.random.randn(n, 3).astype(B0.dtype) b0 = tvm.nd.array(b_np[:, 0], ctx) b1 = tvm.nd.array(b_np[:, 1], ctx) b2 = tvm.nd.array(b_np[:, 2], ctx) d_np = np.random.randn( n, ).astype(D.dtype) d = tvm.nd.array(d_np, ctx) c_np = np.random.randn(n, 3).astype(C0.dtype) c = tvm.nd.array(c_np[:, 0], ctx) start = time() for _ in range(nits): fconv0(a0, a1, a2, b0, b1, b2, d, c) # Evaluations ctx.sync() end = time() print("Timing with {} points: {} x {:.4f}s".format(n, nits, (end - start) / nits))
keops-main
benchmarks/TVM.py
""" TensorFlow, with an XLA backend ==================================== """ import tensorflow as tf # tf.config.optimizer.set_jit(True) from time import time # Make sure that we're using the v2.0.0 print(tf.__version__) # Our function, that XLA is going to compile def KP(x, y, p): D_ij = tf.math.reduce_sum((x - y) ** 2, axis=2) K_ij = tf.math.exp(-D_ij) return K_ij @ p nits = 100 Ns, D = [10000, 100000, 1000000], 3 ############################################# # # # First, test without XLA for N in Ns[:1]: # Generate the data x = tf.random.normal((N, 1, D)) y = tf.random.normal((1, N, D)) p = tf.random.normal((N, 1)) # First run just in case... out = KP(x, y, p) # Timings for TF vanilla start = time() for _ in range(nits): out = KP(x, y, p) # N.B.: we need some kind of "print" statement to make # sure that TF actually executes our code print("TF Vanilla : ", out[:10]) end = time() print("Timing with {} points: {} x {:.4f}s".format(N, nits, (end - start) / nits)) ############################################## # # Second, test with XLA for N in Ns: # Generate the data x = tf.random.normal((N, 1, D)) y = tf.random.normal((1, N, D)) p = tf.random.normal((N, 1)) # Precompile just in case... out = tf.xla.experimental.compile(KP, inputs=[x, y, p]) start = time() for _ in range(nits): out = tf.xla.experimental.compile(KP, inputs=[x, y, p])[0] # N.B.: we need some kind of "print" statement to make # sure that TF actually executes our code print("XLA : ", out[:10]) end = time() print("Timing with {} points: {} x {:.4f}s".format(N, nits, (end - start) / nits))
keops-main
benchmarks/TF_XLA.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # KeOps documentation build configuration file, created by # sphinx-quickstart on Thu Sep 13 14:50:06 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath(".")) sys.path.insert(0, os.path.abspath("sphinxext")) try: import pykeops except: sys.path.insert(0, os.path.join(os.path.abspath(os.pardir), "pykeops")) import pykeops from pykeops import __version__ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "matplotlib.sphinxext.plot_directive", "sphinxcontrib.httpdomain", "sphinx_gallery.gen_gallery", "sphinx.ext.napoleon", # 'sphinx.ext.viewcode', "sphinx.ext.linkcode", ] # sphinx.ext.linkcode def linkcode_resolve(domain, info): def find_source(): # try to find the file and line number, based on code from numpy: # https://github.com/numpy/numpy/blob/main/doc/source/conf.py#L286 obj = sys.modules[info["module"]] for part in info["fullname"].split("."): obj = getattr(obj, part) import inspect import os fn = inspect.getsourcefile(obj) fn = os.path.relpath(fn, start=os.path.dirname(pykeops.__file__)) source, lineno = inspect.getsourcelines(obj) return fn, lineno, lineno + len(source) - 1 if domain != "py" or not info["module"]: return None try: filename = "pykeops/%s#L%d-L%d" % find_source() except Exception: filename = info["module"].replace(".", "/") + ".py" return "https://github.com/getkeops/keops/tree/main/%s" % filename # def linkcode_resolve(domain, info): # from sphinx.util import get_full_modname # if domain != 'py': # return None # if not info['module']: # return None # filename = get_full_modname(info['module'], info['fullname']).replace('.', '/') # return "https://github.com/getkeops/keops/tree/main/%s.py" % filename from sphinx_gallery.sorting import FileNameSortKey sphinx_gallery_conf = { # path to your examples scripts "examples_dirs": [ "../pykeops/pykeops/tutorials", "../pykeops/pykeops/benchmarks", "../pykeops/pykeops/examples", ], # path where to save gallery generated examples "gallery_dirs": ["_auto_tutorials", "_auto_benchmarks", "./_auto_examples"], # order of the Gallery "within_subsection_order": FileNameSortKey, # Add patterns # 'filename_pattern': r'../pykeops/pykeops/tutorials/*', } # Generate the API documentation when building autosummary_generate = True numpydoc_show_class_members = False autodoc_member_order = "bysource" def skip(app, what, name, obj, would_skip, options): if name == "__call__": return False return would_skip def setup(app): app.connect("autodoc-skip-member", skip) # Include the example source for plots in API docs # plot_include_source = True # plot_formats = [("png", 90)] # plot_html_show_formats = False # plot_html_show_source_link = False # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] source_parsers = { ".md": "recommonmark.parser.CommonMarkParser", } # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = [".rst", ".md"] # The root toctree document. root_doc = "index" # General information about the project. project = "KeOps" # import time # copyright = '2018-{}, Benjamin Charlier, Jean Feydy, Joan A. Glaunès'.format(time.strftime("%Y")) copyright = "2018-2021, Benjamin Charlier, Jean Feydy, Joan A. Glaunès." author = "Benjamin Charlier, Jean Feydy, Joan A. Glaunès." # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False exclude_patterns = ["readme_first.md"] # display broken internal links nitpicky = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "canonical_url": "", "analytics_id": "", "logo_only": False, "display_version": True, "prev_next_buttons_location": "bottom", "style_external_links": False, # Toc options "collapse_navigation": True, "sticky_navigation": True, "navigation_depth": 4, "includehidden": True, "titles_only": False, } html_context = { "display_github": True, # Integrate Github "github_user": "getkeops", # Username "github_repo": "keops", # Repo name "github_version": "main", # Version "conf_py_path": "/doc/", # Path in the checkout to the docs root } # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "KeOps" # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "KeOps documentation" # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/logo.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = "%b %d, %Y" # If true, links to the reST sources are added to the pages. html_show_sourcelink = False html_show_sphinx = False # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "KeOpsdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( root_doc, "KeOps.tex", "KeOps Documentation", "Benjamin Charlier, Jean Feydy, Joan A. Glaunès", "manual", ), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(root_doc, "keops", "KeOps Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( root_doc, "KeOps", "KeOps Documentation", author, "KeOps", "One line description of project.", "Miscellaneous", ), ] def setup(app): app.add_css_file("css/custom.css")
keops-main
doc/conf.py
#! /usr/bin/env python """ Convert empty IPython notebook to a sphinx doc page. """ import sys from subprocess import check_call as sh def convert_nb(nbname): # Execute the notebook sh(["jupyter", "nbconvert", "--to", "notebook", "--execute", "--inplace", nbname]) # Convert to .rst for Sphinx sh( [ "jupyter", "nbconvert", "--to", "rst", nbname, "--TagRemovePreprocessor.remove_cell_tags={'hide'}", "--TagRemovePreprocessor.remove_input_tags={'hide-input'}", "--TagRemovePreprocessor.remove_all_outputs_tags={'hide-output'}", ] ) # Clear notebook output sh( [ "jupyter", "nbconvert", "--to", "notebook", "--inplace", "--ClearOutputPreprocessor.enabled=True", nbname, ] ) # Touch the .rst file so it has a later modify time than the source sh(["touch", nbname + ".rst"]) if __name__ == "__main__": for nbname in sys.argv[1:]: convert_nb(nbname)
keops-main
doc/tools/nb_to_doc.py
from setuptools import setup, find_packages setup( name = 'res-mlp-pytorch', packages = find_packages(exclude=[]), version = '0.0.6', license='MIT', description = 'ResMLP - Pytorch', author = 'Phil Wang', author_email = '[email protected]', url = 'https://github.com/lucidrains/res-mlp-pytorch', keywords = [ 'artificial intelligence', 'deep learning', 'image recognition' ], install_requires=[ 'einops>=0.3', 'torch>=1.6' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
res-mlp-pytorch-main
setup.py
from res_mlp_pytorch.res_mlp_pytorch import ResMLP
res-mlp-pytorch-main
res_mlp_pytorch/__init__.py
import torch from torch import nn, einsum from einops.layers.torch import Rearrange, Reduce # helpers def pair(val): return (val, val) if not isinstance(val, tuple) else val # classes class Affine(nn.Module): def __init__(self, dim): super().__init__() self.g = nn.Parameter(torch.ones(1, 1, dim)) self.b = nn.Parameter(torch.zeros(1, 1, dim)) def forward(self, x): return x * self.g + self.b class PreAffinePostLayerScale(nn.Module): # https://arxiv.org/abs/2103.17239 def __init__(self, dim, depth, fn): super().__init__() if depth <= 18: init_eps = 0.1 elif depth > 18 and depth <= 24: init_eps = 1e-5 else: init_eps = 1e-6 scale = torch.zeros(1, 1, dim).fill_(init_eps) self.scale = nn.Parameter(scale) self.affine = Affine(dim) self.fn = fn def forward(self, x): return self.fn(self.affine(x)) * self.scale + x def ResMLP(*, image_size, patch_size, dim, depth, num_classes, expansion_factor = 4): image_height, image_width = pair(image_size) assert (image_height % patch_size) == 0 and (image_width % patch_size) == 0, 'image height and width must be divisible by patch size' num_patches = (image_height // patch_size) * (image_width // patch_size) wrapper = lambda i, fn: PreAffinePostLayerScale(dim, i + 1, fn) return nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.Linear((patch_size ** 2) * 3, dim), *[nn.Sequential( wrapper(i, nn.Conv1d(num_patches, num_patches, 1)), wrapper(i, nn.Sequential( nn.Linear(dim, dim * expansion_factor), nn.GELU(), nn.Linear(dim * expansion_factor, dim) )) ) for i in range(depth)], Affine(dim), Reduce('b n c -> b c', 'mean'), nn.Linear(dim, num_classes) )
res-mlp-pytorch-main
res_mlp_pytorch/res_mlp_pytorch.py
from setuptools import setup, find_packages setup( name = 'BS-RoFormer', packages = find_packages(exclude=[]), version = '0.0.2', license='MIT', description = 'BS-RoFormer - Band-Split Rotary Transformer for SOTA Music Source Separation', author = 'Phil Wang', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/lucidrains/BS-RoFormer', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention mechanism', 'music source separation' ], install_requires=[ 'beartype', 'einops>=0.6.1', 'rotary-embedding-torch>=0.3.0', 'torch>=2.0', ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
BS-RoFormer-main
setup.py
from bs_roformer.bs_roformer import BSRoformer
BS-RoFormer-main
bs_roformer/__init__.py
from functools import wraps from packaging import version from collections import namedtuple import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, reduce # constants FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # helpers def exists(val): return val is not None def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # main class class Attend(nn.Module): def __init__( self, dropout = 0., flash = False ): super().__init__() self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = FlashAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = FlashAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = FlashAttentionConfig(False, True, True) def flash_attn(self, q, k, v): _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, dropout_p = self.dropout if self.training else 0. ) return out def forward(self, q, k, v): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ q_len, k_len, device = q.shape[-2], k.shape[-2], q.device scale = q.shape[-1] ** -0.5 if self.flash: return self.flash_attn(q, k, v) # similarity sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale # attention attn = sim.softmax(dim=-1) attn = self.attn_dropout(attn) # aggregate values out = einsum(f"b h i j, b h j d -> b h i d", attn, v) return out
BS-RoFormer-main
bs_roformer/attend.py
import torch from torch import nn, einsum, Tensor from torch.nn import Module, ModuleList import torch.nn.functional as F from bs_roformer.attend import Attend from beartype.typing import Tuple, Optional, List from beartype import beartype from rotary_embedding_torch import RotaryEmbedding from einops import rearrange, pack, unpack # helper functions def exists(val): return val is not None # norm class RMSNorm(Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.gamma = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.gamma # attention class FeedForward(Module): def __init__( self, dim, mult = 4, dropout = 0. ): super().__init__() dim_inner = int(dim * mult) self.net = nn.Sequential( RMSNorm(dim), nn.Linear(dim, dim_inner), nn.GELU(), nn.Dropout(dropout), nn.Linear(dim_inner, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(Module): def __init__( self, dim, heads = 8, dim_head = 64, dropout = 0., rotary_embed = None, flash = True ): super().__init__() self.heads = heads self.scale = dim_head **-0.5 dim_inner = heads * dim_head self.rotary_embed = rotary_embed self.attend = Attend(flash = flash, dropout = dropout) self.norm = RMSNorm(dim) self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(dim_inner, dim, bias = False), nn.Dropout(dropout) ) def forward(self, x): x = self.norm(x) q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv = 3, h = self.heads) if exists(self.rotary_embed): q = self.rotary_embed.rotate_queries_or_keys(q) k = self.rotary_embed.rotate_queries_or_keys(k) out = self.attend(q, k, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(Module): def __init__( self, *, dim, depth, dim_head = 64, heads = 8, attn_dropout = 0., ff_dropout = 0., ff_mult = 4, norm_output = True, rotary_embed = None, flash_attn = True ): super().__init__() self.layers = ModuleList([]) for _ in range(depth): self.layers.append(ModuleList([ Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_embed = rotary_embed, flash = flash_attn), FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout) ])) self.norm = RMSNorm(dim) if norm_output else nn.Identity() def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) # bandsplit module class BandSplit(Module): @beartype def __init__( self, dim, dim_inputs: Tuple[int, ...] ): super().__init__() self.dim_inputs = dim_inputs self.to_features = ModuleList([]) for dim_in in dim_inputs: net = nn.Sequential( RMSNorm(dim_in), nn.Linear(dim_in, dim) ) self.to_features.append(net) def forward(self, x): x = x.split(self.dim_inputs, dim = -1) outs = [] for split_input, to_feature in zip(x, self.to_features): split_output = to_feature(split_input) outs.append(split_output) return torch.stack(outs, dim = -2) class LinearGLUWithTanH(Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x.tanh() * gate.sigmoid() class MaskEstimator(Module): @beartype def __init__( self, dim, dim_inputs: Tuple[int, ...], depth ): super().__init__() self.dim_inputs = dim_inputs self.to_freqs = ModuleList([]) for dim_in in dim_inputs: net = [] for ind in range(depth): is_last = ind == (depth - 1) dim_out = dim if not is_last else dim_in net.append(LinearGLUWithTanH(dim, dim_out)) self.to_freqs.append(nn.Sequential(*net)) def forward(self, x): x = x.unbind(dim = -2) outs = [] for band_features, to_freq in zip(x, self.to_freqs): freq_out = to_freq(band_features) outs.append(freq_out) return torch.cat(outs, dim = -1) # main class class BSRoformer(Module): @beartype def __init__( self, dim, *, depth, time_transformer_depth = 2, freq_transformer_depth = 2, freqs_per_bands: Tuple[int, ...] = (256, 257), # in the paper, they divide into ~60 bands, test with 1 for starters dim_head = 64, heads = 8, attn_dropout = 0., ff_dropout = 0., flash_attn = True, dim_freqs_in = 513, stft_n_fft = 1024, stft_hop_length = 256, stft_win_length = 1024, stft_normalized = False, mask_estimator_depth = 1, multi_stft_resolution_loss_weight = 1., multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), multi_stft_hop_size = 147, multi_stft_normalized = False ): super().__init__() self.layers = ModuleList([]) transformer_kwargs = dict( dim = dim, heads = heads, dim_head = dim_head, attn_dropout = attn_dropout, ff_dropout = ff_dropout, flash_attn = flash_attn ) time_rotary_embed = RotaryEmbedding(dim = dim_head) freq_rotary_embed = RotaryEmbedding(dim = dim_head) for _ in range(depth): self.layers.append(nn.ModuleList([ Transformer(depth = time_transformer_depth, rotary_embed = time_rotary_embed, **transformer_kwargs), Transformer(depth = freq_transformer_depth, rotary_embed = freq_rotary_embed, **transformer_kwargs) ])) self.stft_kwargs = dict( n_fft = stft_n_fft, hop_length = stft_hop_length, win_length = stft_win_length, normalized = stft_normalized ) freqs = torch.stft(torch.randn(1, 1024), **self.stft_kwargs, return_complex = True).shape[1] assert len(freqs_per_bands) > 1 assert sum(freqs_per_bands) == freqs, f'the number of freqs in the bands must equal {freqs} based on the STFT settings' freqs_per_bands_with_complex = tuple(2 * f for f in freqs_per_bands) self.band_split = BandSplit( dim = dim, dim_inputs = freqs_per_bands_with_complex ) self.mask_estimator = MaskEstimator( dim = dim, dim_inputs = freqs_per_bands_with_complex, depth = mask_estimator_depth ) # for the multi-resolution stft loss self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes self.multi_stft_n_fft = stft_n_fft self.multi_stft_kwargs = dict( hop_length = multi_stft_hop_size, normalized = multi_stft_normalized ) def forward( self, raw_audio, target = None, return_loss_breakdown = False ): """ einops b - batch f - freq t - time c - complex (2) d - feature dimension """ # to stft stft_repr = torch.stft(raw_audio, **self.stft_kwargs, return_complex = True) stft_repr = torch.view_as_real(stft_repr) x = rearrange(stft_repr, 'b f t c -> b t (f c)') x = self.band_split(x) # axial / hierarchical attention for time_transformer, freq_transformer in self.layers: x = rearrange(x, 'b t f d -> b f t d') x, ps = pack([x], 'b * d') x = time_transformer(x) x, = unpack(x, ps, 'b * d') x = rearrange(x, 'b f t d -> b t f d') x, ps = pack([x], 'b * d') x = freq_transformer(x) x, = unpack(x, ps, 'b * d') mask = self.mask_estimator(x) mask = rearrange(mask, 'b t (f c) -> b f t c', c = 2) # modulate frequency representation stft_repr = stft_repr * mask # istft stft_repr = torch.view_as_complex(stft_repr) recon_audio = torch.istft(stft_repr, **self.stft_kwargs, return_complex = False) # if a target is passed in, calculate loss for learning if not exists(target): return recon_audio target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft loss = F.l1_loss(recon_audio, target) multi_stft_resolution_loss = 0. for window_size in self.multi_stft_resolutions_window_sizes: res_stft_kwargs = dict( n_fft = max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft win_length = window_size, return_complex = True, **self.multi_stft_kwargs, ) recon_Y = torch.stft(recon_audio, **res_stft_kwargs) target_Y = torch.stft(target, **res_stft_kwargs) multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight total_loss = loss + weighted_multi_resolution_loss if not return_loss_breakdown: return total_loss return total_loss, (loss, multi_stft_resolution_loss)
BS-RoFormer-main
bs_roformer/bs_roformer.py
from setuptools import setup, find_packages setup( name = 'invariant-point-attention', packages = find_packages(), version = '0.2.2', license='MIT', description = 'Invariant Point Attention', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = '[email protected]', url = 'https://github.com/lucidrains/invariant-point-attention', keywords = [ 'artificial intelligence', 'deep learning', 'protein folding' ], install_requires=[ 'einops>=0.3', 'torch>=1.7' ], setup_requires=[ 'pytest-runner', ], tests_require=[ 'pytest' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
invariant-point-attention-main
setup.py
import torch import torch.nn.functional as F from torch import nn, einsum from torch.optim import Adam from einops import rearrange, repeat import sidechainnet as scn from invariant_point_attention import IPATransformer BATCH_SIZE = 1 GRADIENT_ACCUMULATE_EVERY = 16 def cycle(loader, len_thres = 200): while True: for data in loader: if data.seqs.shape[1] > len_thres: continue yield data net = IPATransformer( dim = 16, num_tokens = 21, depth = 5, require_pairwise_repr = False, predict_points = True ).cuda() data = scn.load( casp_version = 12, thinning = 30, with_pytorch = 'dataloaders', batch_size = BATCH_SIZE, dynamic_batching = False ) dl = cycle(data['train']) optim = Adam(net.parameters(), lr=1e-3) for _ in range(10000): for _ in range(GRADIENT_ACCUMULATE_EVERY): batch = next(dl) seqs, coords, masks = batch.seqs, batch.crds, batch.msks seqs = seqs.cuda().argmax(dim = -1) coords = coords.cuda() masks = masks.cuda().bool() l = seqs.shape[1] coords = rearrange(coords, 'b (l s) c -> b l s c', s = 14) # Keeping only the Ca atom coords = coords[:, :, 1, :] noised_coords = coords + torch.randn_like(coords) denoised_coords = net( seqs, translations = noised_coords, mask = masks ) loss = F.mse_loss(denoised_coords[masks], coords[masks]) (loss / GRADIENT_ACCUMULATE_EVERY).backward() print('loss:', loss.item()) optim.step() optim.zero_grad()
invariant-point-attention-main
denoise.py
import torch from torch import nn from einops import repeat from invariant_point_attention import InvariantPointAttention, IPABlock from invariant_point_attention.utils import rot def test_ipa_invariance(): attn = InvariantPointAttention( dim = 64, heads = 8, scalar_key_dim = 16, scalar_value_dim = 16, point_key_dim = 4, point_value_dim = 4 ) seq = torch.randn(1, 256, 64) pairwise_repr = torch.randn(1, 256, 256, 64) mask = torch.ones(1, 256).bool() rotations = repeat(rot(*torch.randn(3)), 'r1 r2 -> b n r1 r2', b = 1, n = 256) translations = torch.randn(1, 256, 3) # random rotation, for testing invariance random_rotation = rot(*torch.randn(3)) # get outputs of IPA attn_out = attn( seq, pairwise_repr = pairwise_repr, rotations = rotations, translations = translations, mask = mask ) rotated_attn_out = attn( seq, pairwise_repr = pairwise_repr, rotations = rotations @ random_rotation, translations = translations @ random_rotation, mask = mask ) # output must be invariant diff = (attn_out - rotated_attn_out).max() assert diff <= 1e-6, 'must be invariant to global rotation' def test_ipa_block_invariance(): attn = IPABlock( dim = 64, heads = 8, scalar_key_dim = 16, scalar_value_dim = 16, point_key_dim = 4, point_value_dim = 4 ) seq = torch.randn(1, 256, 64) pairwise_repr = torch.randn(1, 256, 256, 64) mask = torch.ones(1, 256).bool() rotations = repeat(rot(*torch.randn(3)), 'r1 r2 -> b n r1 r2', b = 1, n = 256) translations = torch.randn(1, 256, 3) # random rotation, for testing invariance random_rotation = rot(*torch.randn(3)) # get outputs of IPA attn_out = attn( seq, pairwise_repr = pairwise_repr, rotations = rotations, translations = translations, mask = mask ) rotated_attn_out = attn( seq, pairwise_repr = pairwise_repr, rotations = rotations @ random_rotation, translations = translations @ random_rotation, mask = mask ) # output must be invariant diff = (attn_out - rotated_attn_out).max() assert diff <= 1e-6, 'must be invariant to global rotation'
invariant-point-attention-main
tests/invariance.py
import torch import torch.nn.functional as F from torch.cuda.amp import autocast from contextlib import contextmanager from torch import nn, einsum from einops.layers.torch import Rearrange from einops import rearrange, repeat # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def max_neg_value(t): return -torch.finfo(t.dtype).max @contextmanager def disable_tf32(): orig_value = torch.backends.cuda.matmul.allow_tf32 torch.backends.cuda.matmul.allow_tf32 = False yield torch.backends.cuda.matmul.allow_tf32 = orig_value # classes class InvariantPointAttention(nn.Module): def __init__( self, *, dim, heads = 8, scalar_key_dim = 16, scalar_value_dim = 16, point_key_dim = 4, point_value_dim = 4, pairwise_repr_dim = None, require_pairwise_repr = True, eps = 1e-8 ): super().__init__() self.eps = eps self.heads = heads self.require_pairwise_repr = require_pairwise_repr # num attention contributions num_attn_logits = 3 if require_pairwise_repr else 2 # qkv projection for scalar attention (normal) self.scalar_attn_logits_scale = (num_attn_logits * scalar_key_dim) ** -0.5 self.to_scalar_q = nn.Linear(dim, scalar_key_dim * heads, bias = False) self.to_scalar_k = nn.Linear(dim, scalar_key_dim * heads, bias = False) self.to_scalar_v = nn.Linear(dim, scalar_value_dim * heads, bias = False) # qkv projection for point attention (coordinate and orientation aware) point_weight_init_value = torch.log(torch.exp(torch.full((heads,), 1.)) - 1.) self.point_weights = nn.Parameter(point_weight_init_value) self.point_attn_logits_scale = ((num_attn_logits * point_key_dim) * (9 / 2)) ** -0.5 self.to_point_q = nn.Linear(dim, point_key_dim * heads * 3, bias = False) self.to_point_k = nn.Linear(dim, point_key_dim * heads * 3, bias = False) self.to_point_v = nn.Linear(dim, point_value_dim * heads * 3, bias = False) # pairwise representation projection to attention bias pairwise_repr_dim = default(pairwise_repr_dim, dim) if require_pairwise_repr else 0 if require_pairwise_repr: self.pairwise_attn_logits_scale = num_attn_logits ** -0.5 self.to_pairwise_attn_bias = nn.Sequential( nn.Linear(pairwise_repr_dim, heads), Rearrange('b ... h -> (b h) ...') ) # combine out - scalar dim + pairwise dim + point dim * (3 for coordinates in R3 and then 1 for norm) self.to_out = nn.Linear(heads * (scalar_value_dim + pairwise_repr_dim + point_value_dim * (3 + 1)), dim) def forward( self, single_repr, pairwise_repr = None, *, rotations, translations, mask = None ): x, b, h, eps, require_pairwise_repr = single_repr, single_repr.shape[0], self.heads, self.eps, self.require_pairwise_repr assert not (require_pairwise_repr and not exists(pairwise_repr)), 'pairwise representation must be given as second argument' # get queries, keys, values for scalar and point (coordinate-aware) attention pathways q_scalar, k_scalar, v_scalar = self.to_scalar_q(x), self.to_scalar_k(x), self.to_scalar_v(x) q_point, k_point, v_point = self.to_point_q(x), self.to_point_k(x), self.to_point_v(x) # split out heads q_scalar, k_scalar, v_scalar = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q_scalar, k_scalar, v_scalar)) q_point, k_point, v_point = map(lambda t: rearrange(t, 'b n (h d c) -> (b h) n d c', h = h, c = 3), (q_point, k_point, v_point)) rotations = repeat(rotations, 'b n r1 r2 -> (b h) n r1 r2', h = h) translations = repeat(translations, 'b n c -> (b h) n () c', h = h) # rotate qkv points into global frame q_point = einsum('b n d c, b n c r -> b n d r', q_point, rotations) + translations k_point = einsum('b n d c, b n c r -> b n d r', k_point, rotations) + translations v_point = einsum('b n d c, b n c r -> b n d r', v_point, rotations) + translations # derive attn logits for scalar and pairwise attn_logits_scalar = einsum('b i d, b j d -> b i j', q_scalar, k_scalar) * self.scalar_attn_logits_scale if require_pairwise_repr: attn_logits_pairwise = self.to_pairwise_attn_bias(pairwise_repr) * self.pairwise_attn_logits_scale # derive attn logits for point attention point_qk_diff = rearrange(q_point, 'b i d c -> b i () d c') - rearrange(k_point, 'b j d c -> b () j d c') point_dist = (point_qk_diff ** 2).sum(dim = (-1, -2)) point_weights = F.softplus(self.point_weights) point_weights = repeat(point_weights, 'h -> (b h) () ()', b = b) attn_logits_points = -0.5 * (point_dist * point_weights * self.point_attn_logits_scale) # combine attn logits attn_logits = attn_logits_scalar + attn_logits_points if require_pairwise_repr: attn_logits = attn_logits + attn_logits_pairwise # mask if exists(mask): mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j') mask = repeat(mask, 'b i j -> (b h) i j', h = h) mask_value = max_neg_value(attn_logits) attn_logits = attn_logits.masked_fill(~mask, mask_value) # attention attn = attn_logits.softmax(dim = - 1) with disable_tf32(), autocast(enabled = False): # disable TF32 for precision # aggregate values results_scalar = einsum('b i j, b j d -> b i d', attn, v_scalar) attn_with_heads = rearrange(attn, '(b h) i j -> b h i j', h = h) if require_pairwise_repr: results_pairwise = einsum('b h i j, b i j d -> b h i d', attn_with_heads, pairwise_repr) # aggregate point values results_points = einsum('b i j, b j d c -> b i d c', attn, v_point) # rotate aggregated point values back into local frame results_points = einsum('b n d c, b n c r -> b n d r', results_points - translations, rotations.transpose(-1, -2)) results_points_norm = torch.sqrt( torch.square(results_points).sum(dim=-1) + eps ) # merge back heads results_scalar = rearrange(results_scalar, '(b h) n d -> b n (h d)', h = h) results_points = rearrange(results_points, '(b h) n d c -> b n (h d c)', h = h) results_points_norm = rearrange(results_points_norm, '(b h) n d -> b n (h d)', h = h) results = (results_scalar, results_points, results_points_norm) if require_pairwise_repr: results_pairwise = rearrange(results_pairwise, 'b h n d -> b n (h d)', h = h) results = (*results, results_pairwise) # concat results and project out results = torch.cat(results, dim = -1) return self.to_out(results) # one transformer block based on IPA def FeedForward(dim, mult = 1., num_layers = 2, act = nn.ReLU): layers = [] dim_hidden = dim * mult for ind in range(num_layers): is_first = ind == 0 is_last = ind == (num_layers - 1) dim_in = dim if is_first else dim_hidden dim_out = dim if is_last else dim_hidden layers.append(nn.Linear(dim_in, dim_out)) if is_last: continue layers.append(act()) return nn.Sequential(*layers) class IPABlock(nn.Module): def __init__( self, *, dim, ff_mult = 1, ff_num_layers = 3, # in the paper, they used 3 layer transition (feedforward) block post_norm = True, # in the paper, they used post-layernorm - offering pre-norm as well post_attn_dropout = 0., post_ff_dropout = 0., **kwargs ): super().__init__() self.post_norm = post_norm self.attn_norm = nn.LayerNorm(dim) self.attn = InvariantPointAttention(dim = dim, **kwargs) self.post_attn_dropout = nn.Dropout(post_attn_dropout) self.ff_norm = nn.LayerNorm(dim) self.ff = FeedForward(dim, mult = ff_mult, num_layers = ff_num_layers) self.post_ff_dropout = nn.Dropout(post_ff_dropout) def forward(self, x, **kwargs): post_norm = self.post_norm attn_input = x if post_norm else self.attn_norm(x) x = self.attn(attn_input, **kwargs) + x x = self.post_attn_dropout(x) x = self.attn_norm(x) if post_norm else x ff_input = x if post_norm else self.ff_norm(x) x = self.ff(ff_input) + x x = self.post_ff_dropout(x) x = self.ff_norm(x) if post_norm else x return x # add an IPA Transformer - iteratively updating rotations and translations # this portion is not accurate to AF2, as AF2 applies a FAPE auxiliary loss on each layer, as well as a stop gradient on the rotations # just an attempt to see if this could evolve to something more generally usable class IPATransformer(nn.Module): def __init__( self, *, dim, depth, num_tokens = None, predict_points = False, detach_rotations = True, **kwargs ): super().__init__() # using quaternion functions from pytorch3d try: from pytorch3d.transforms import quaternion_multiply, quaternion_to_matrix self.quaternion_to_matrix = quaternion_to_matrix self.quaternion_multiply = quaternion_multiply except (ImportError, ModuleNotFoundError) as err: print('unable to import pytorch3d - please install with `conda install pytorch3d -c pytorch3d`') raise err # embedding self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None # layers self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ IPABlock(dim = dim, **kwargs), nn.Linear(dim, 6) ])) # whether to detach rotations or not, for stability during training self.detach_rotations = detach_rotations # output self.predict_points = predict_points if predict_points: self.to_points = nn.Linear(dim, 3) def forward( self, single_repr, *, translations = None, quaternions = None, pairwise_repr = None, mask = None ): x, device, quaternion_multiply, quaternion_to_matrix = single_repr, single_repr.device, self.quaternion_multiply, self.quaternion_to_matrix b, n, *_ = x.shape if exists(self.token_emb): x = self.token_emb(x) # if no initial quaternions passed in, start from identity if not exists(quaternions): quaternions = torch.tensor([1., 0., 0., 0.], device = device) # initial rotations quaternions = repeat(quaternions, 'd -> b n d', b = b, n = n) # if not translations passed in, start from identity if not exists(translations): translations = torch.zeros((b, n, 3), device = device) # go through the layers and apply invariant point attention and feedforward for block, to_update in self.layers: rotations = quaternion_to_matrix(quaternions) if self.detach_rotations: rotations = rotations.detach() x = block( x, pairwise_repr = pairwise_repr, rotations = rotations, translations = translations ) # update quaternion and translation quaternion_update, translation_update = to_update(x).chunk(2, dim = -1) quaternion_update = F.pad(quaternion_update, (1, 0), value = 1.) quaternion_update = quaternion_update / torch.linalg.norm(quaternion_update, dim=-1, keepdim=True) quaternions = quaternion_multiply(quaternions, quaternion_update) translations = translations + einsum('b n c, b n c r -> b n r', translation_update, rotations) if not self.predict_points: return x, translations, quaternions points_local = self.to_points(x) rotations = quaternion_to_matrix(quaternions) points_global = einsum('b n c, b n c d -> b n d', points_local, rotations) + translations return points_global
invariant-point-attention-main
invariant_point_attention/invariant_point_attention.py
from invariant_point_attention.invariant_point_attention import InvariantPointAttention, IPABlock, IPATransformer
invariant-point-attention-main
invariant_point_attention/__init__.py
import torch from torch import sin, cos, atan2, acos from functools import wraps def cast_torch_tensor(fn): @wraps(fn) def inner(t): if not torch.is_tensor(t): t = torch.tensor(t, dtype = torch.get_default_dtype()) return fn(t) return inner @cast_torch_tensor def rot_z(gamma): return torch.tensor([ [cos(gamma), -sin(gamma), 0], [sin(gamma), cos(gamma), 0], [0, 0, 1] ], dtype=gamma.dtype) @cast_torch_tensor def rot_y(beta): return torch.tensor([ [cos(beta), 0, sin(beta)], [0, 1, 0], [-sin(beta), 0, cos(beta)] ], dtype=beta.dtype) def rot(alpha, beta, gamma): return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
invariant-point-attention-main
invariant_point_attention/utils.py
from setuptools import setup, find_packages setup( name = 'toolformer-pytorch', packages = find_packages(exclude=[]), version = '0.0.27', license='MIT', description = 'Toolformer - Pytorch', author = 'Phil Wang', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/lucidrains/toolformer-pytorch', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention mechanism', 'automated-tool-use' ], install_requires=[ 'beartype', 'einops>=0.4', 'torch>=1.6', 'tqdm', 'x-clip' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
toolformer-pytorch-main
setup.py
import torch from torch import nn, einsum from einops import rearrange from x_clip.tokenizer import tokenizer # helpers def exists(val): return val is not None # normalization class RMSNorm(nn.Module): def __init__(self, dim, eps = 1e-8): super().__init__() self.scale = dim ** -0.5 self.eps = eps self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) * self.scale return x / norm.clamp(min = self.eps) * self.g # rotary positional embedding # https://arxiv.org/abs/2104.09864 class RotaryEmbedding(nn.Module): def __init__(self, dim): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) def forward(self, max_seq_len, *, device): seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype) freqs = einsum("i , j -> i j", seq, self.inv_freq) return torch.cat((freqs, freqs), dim=-1) def rotate_half(x): x = rearrange(x, "... (j d) -> ... j d", j=2) x1, x2 = x.unbind(dim=-2) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(pos, t): return (t * pos.cos()) + (rotate_half(t) * pos.sin()) # all we need class ParallelTransformerBlock(nn.Module): def __init__(self, dim, dim_head=64, heads=8, ff_mult=4): super().__init__() self.norm = RMSNorm(dim) attn_inner_dim = dim_head * heads ff_inner_dim = dim * ff_mult self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim)) self.heads = heads self.scale = dim_head**-0.5 self.rotary_emb = RotaryEmbedding(dim_head) self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False) self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False) self.ff_out = nn.Sequential( nn.GELU(), nn.Linear(ff_inner_dim, dim, bias=False) ) # for caching causal mask and rotary embeddings self.register_buffer("mask", None, persistent=False) self.register_buffer("pos_emb", None, persistent=False) def get_mask(self, n, device): if self.mask is not None and self.mask.shape[-1] >= n: return self.mask[:n, :n] mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1) self.register_buffer("mask", mask, persistent=False) return mask def get_rotary_embedding(self, n, device): if self.pos_emb is not None and self.pos_emb.shape[-2] >= n: return self.pos_emb[:n] pos_emb = self.rotary_emb(n, device=device) self.register_buffer("pos_emb", pos_emb, persistent=False) return pos_emb def forward(self, x): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, device, h = x.shape[1], x.device, self.heads # pre layernorm x = self.norm(x) # attention queries, keys, values, and feedforward inner q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1) # split heads # they use multi-query single-key-value attention, yet another Noam Shazeer paper # they found no performance loss past a certain scale, and more efficient decoding obviously # https://arxiv.org/abs/1911.02150 q = rearrange(q, "b n (h d) -> b h n d", h=h) # rotary embeddings positions = self.get_rotary_embedding(n, device) q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k)) # scale q = q * self.scale # similarity sim = einsum("b h i d, b j d -> b h i j", q, k) # causal mask causal_mask = self.get_mask(n, device) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) # attention attn = sim.softmax(dim=-1) # aggregate values out = einsum("b h i j, b j d -> b h i d", attn, v) # merge heads out = rearrange(out, "b h n d -> b n (h d)") return self.attn_out(out) + self.ff_out(ff) # Transformer class Transformer(nn.Module): def __init__( self, dim, depth, heads, dim_head, ff_mult = 4, ): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append( ParallelTransformerBlock(dim, dim_head, heads, ff_mult), ) def forward(self, x): for block in self.layers: x = block(x) + x return x # classes class PaLM(nn.Module): def __init__( self, dim, depth, num_tokens=tokenizer.vocab_size, dim_head=64, heads=8, ff_mult=4, ): super().__init__() self.emb = nn.Embedding(num_tokens, dim) self.transformer = Transformer(dim, depth, heads, dim_head, ff_mult) self.to_logits = nn.Sequential( RMSNorm(dim), nn.Linear(dim, num_tokens) ) def forward(self, x): x = self.emb(x) x = self.transformer(x) return self.to_logits(x) if __name__ == "__main__": palm = PaLM( num_tokens = 20000, dim = 512, depth = 6, dim_head = 64, heads = 8, ff_mult = 4, ) tokens = torch.randint(0, 20000, (1, 512)) logits = palm(tokens) print(logits.shape)
toolformer-pytorch-main
toolformer_pytorch/palm.py
import os try: from dotenv import load_dotenv load_dotenv() import requests import calendar import wolframalpha import datetime from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from operator import pow, truediv, mul, add, sub # Optional imports from googleapiclient.discovery import build except ImportError: print('please run `pip install tools-requirements.txt` first at project directory') exit() ''' Calendar Uses Python's datetime and calendar libraries to retrieve the current date. input - None output - A string, the current date. ''' def Calendar(): now = datetime.datetime.now() return f'Today is {calendar.day_name[now.weekday()]}, {calendar.month_name[now.month]} {now.day}, {now.year}.' ''' Wikipedia Search Uses ColBERTv2 to retrieve Wikipedia documents. input_query - A string, the input query (e.g. "what is a dog?") k - The number of documents to retrieve output - A list of strings, each string is a Wikipedia document Adapted from Stanford's DSP: https://github.com/stanfordnlp/dsp/ Also see: https://github.com/lucabeetz/dsp ''' class ColBERTv2: def __init__(self, url: str): self.url = url def __call__(self, query, k=10): topk = colbertv2_get_request(self.url, query, k) topk = [doc['text'] for doc in topk] return topk def colbertv2_get_request(url: str, query: str, k: int): payload = {'query': query, 'k': k} res = requests.get(url, params=payload) topk = res.json()['topk'][:k] return topk def WikiSearch( input_query: str, url: str = 'http://ec2-44-228-128-229.us-west-2.compute.amazonaws.com:8893/api/search', k: int = 10 ): retrieval_model = ColBERTv2(url) output = retrieval_model(input_query, k) return output ''' Machine Translation - NLLB-600M Uses HuggingFace's transformers library to translate input query to English. input_query - A string, the input query (e.g. "what is a dog?") output - A string, the translated input query. ''' def MT(input_query: str, model_name: str = "facebook/nllb-200-distilled-600M"): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) input_ids = tokenizer(input_query, return_tensors='pt') outputs = model.generate( **input_ids, forced_bos_token_id=tokenizer.lang_code_to_id["eng_Latn"], ) output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] return output ''' Calculator Calculates the result of a mathematical expression. input_query - A string, the input query (e.g. "400/1400") output - A float, the result of the calculation Adapted from: https://levelup.gitconnected.com/3-ways-to-write-a-calculator-in-python-61642f2e4a9a ''' def Calculator(input_query: str): operators = { '+': add, '-': sub, '*': mul, '/': truediv } if input_query.isdigit(): return float(input_query) for c in operators.keys(): left, operator, right = input_query.partition(c) if operator in operators: return round(operators[operator](Calculator(left), Calculator(right)), 2) # Other Optional Tools ''' Wolfram Alpha Calculator pip install wolframalpha Uses Wolfram Alpha API to calculate input query. input_query - A string, the input query (e.g. "what is 2 + 2?") output - A string, the answer to the input query wolfarm_alpha_appid - your Wolfram Alpha API key ''' def WolframAlphaCalculator(input_query: str): wolfram_alpha_appid = os.environ.get('WOLFRAM_ALPHA_APPID') wolfram_client = wolframalpha.Client(wolfram_alpha_appid) res = wolfram_client.query(input_query) assumption = next(res.pods).text answer = next(res.results).text return f'Assumption: {assumption} \nAnswer: {answer}' ''' Google Search Uses Google's Custom Search API to retrieve Google Search results. input_query - The query to search for. num_results - The number of results to return. api_key - Your Google API key. cse_id - Your Google Custom Search Engine ID. output - A list of dictionaries, each dictionary is a Google Search result ''' def custom_search(query, api_key, cse_id, **kwargs): service = build("customsearch", "v1", developerKey=api_key) res = service.cse().list(q=query, cx=cse_id, **kwargs).execute() return res['items'] def google_search(input_query: str, num_results: int = 10): api_key = os.environ.get('GOOGLE_API_KEY') cse_id = os.environ.get('GOOGLE_CSE_ID') metadata_results = [] results = custom_search(input_query, num=num_results, api_key=api_key, cse_id=cse_id) for result in results: metadata_result = { "snippet": result["snippet"], "title": result["title"], "link": result["link"], } metadata_results.append(metadata_result) return metadata_results ''' Bing Search Uses Bing's Custom Search API to retrieve Bing Search results. input_query: The query to search for. bing_subscription_key: Your Bing API key. num_results: The number of results to return. output: A list of dictionaries, each dictionary is a Bing Search result ''' def _bing_search_results( search_term: str, bing_subscription_key: str, count: int, url: str = "https://api.bing.microsoft.com/v7.0/search" ): headers = {"Ocp-Apim-Subscription-Key": bing_subscription_key} params = { "q": search_term, "count": count, "textDecorations": True, "textFormat": "HTML", } response = requests.get( url, headers=headers, params=params ) response.raise_for_status() search_results = response.json() return search_results["webPages"]["value"] def bing_search( input_query: str, num_results: int = 10 ): bing_subscription_key = os.environ.get("BING_API_KEY") metadata_results = [] results = _bing_search_results(input_query, bing_subscription_key, count=num_results) for result in results: metadata_result = { "snippet": result["snippet"], "title": result["name"], "link": result["url"], } metadata_results.append(metadata_result) return metadata_results if __name__ == '__main__': print(Calendar()) # Outputs a string, the current date print(Calculator('400/1400')) # For Optional Basic Calculator print(WikiSearch('What is a dog?')) # Outputs a list of strings, each string is a Wikipedia document print(MT("Un chien c'est quoi?")) # What is a dog? # Optional Tools print(WolframAlphaCalculator('What is 2 + 2?')) # 4 print(google_search('What is a dog?')) # Outputs a list of dictionaries, each dictionary is a Google Search result print(bing_search('What is a dog?')) # Outputs a list of dictionaries, each dictionary is a Bing Search result
toolformer-pytorch-main
toolformer_pytorch/tools.py
from toolformer_pytorch.palm import PaLM from toolformer_pytorch.toolformer_pytorch import ( Toolformer, filter_tokens_with_api_response, sample, sample_with_api_call, has_api_calls, invoke_tools, replace_all_but_first )
toolformer-pytorch-main
toolformer_pytorch/__init__.py
DEFAULT_PROMPT_INPUT_TAG = '[input]' calculator_prompt = f""" Your task is to add calls to a Calculator API to a piece of text. The calls should help you get information required to complete the text. You can call the API by writing "[Calculator(expression)]" where "expression" is the expression to be computed. Here are some examples of API calls: Input: The number in the next term is 18 + 12 x 3 = 54. Output: The number in the next term is 18 + 12 x 3 = [Calculator(18 + 12 * 3)] 54. Input: The population is 658,893 people. This is 11.4% of the national average of 5,763,868 people. Output: The population is 658,893 people. This is 11.4% of the national average of [Calculator(658,893 / 11.4%)] 5,763,868 people. Input: A total of 252 qualifying matches were played, and 723 goals were scored (an average of 2.87 per match). This is three times less than the 2169 goals last year. Output: A total of 252 qualifying matches were played, and 723 goals were scored (an average of [Calculator(723 / 252)] 2.87 per match). This is twenty goals more than the [Calculator(723 - 20)] 703 goals last year. Input: I went to Paris in 1994 and stayed there until 2011, so in total, it was 17 years. Output: I went to Paris in 1994 and stayed there until 2011, so in total, it was [Calculator(2011 - 1994)] 17 years. Input: From this, we have 4 * 30 minutes = 120 minutes. Output: From this, we have 4 * 30 minutes = [Calculator(4 * 30)] 120 minutes. Input: {DEFAULT_PROMPT_INPUT_TAG} Output: """ wikipedia_search_prompt = f""" Your task is to complete a given piece of text. You can use a Wikipedia Search API to look up information. You can do so by writing "[WikiSearch(term)]" where "term" is the search term you want to look up. Here are some examples of API calls: Input: The colors on the flag of Ghana have the following meanings: red is for the blood of martyrs, green for forests, and gold for mineral wealth. Output: The colors on the flag of Ghana have the following meanings: red is for [WikiSearch("Ghana flag red meaning")] the blood of martyrs, green for forests, and gold for mineral wealth. Input: But what are the risks during production of nanomaterials? Some nanomaterials may give rise to various kinds of lung damage. Output: But what are the risks during production of nanomaterials? [WikiSearch("nanomaterial production risks")] Some nanomaterials may give rise to various kinds of lung damage. Input: Metformin is the first-line drug for patients with type 2 diabetes and obesity. Output: Metformin is the first-line drug for [WikiSearch("Metformin first-line drug")] patients with type 2 diabetes and obesity. Input: {DEFAULT_PROMPT_INPUT_TAG} Output: """ machine_translation_prompt = f""" Your task is to complete a given piece of text by using a Machine Translation API. You can do so by writing "[MT(text)]" where text is the text to be translated into English. Here are some examples: Input: He has published one book: O homem suprimido (“The Supressed Man”) Output: He has published one book: O homem suprimido [MT(O homem suprimido)] (“The Supressed Man”) Input: In Morris de Jonge’s Jeschuah, der klassische jüdische Mann, there is a description of a Jewish writer Output: In Morris de Jonge’s Jeschuah, der klassische jüdische Mann [MT(der klassische jüdische Mann)], there is a description of a Jewish writer Input: 南 京 高 淳 县 住 房 和 城 乡 建 设 局 城 市 新 区 设 计 a plane of reference Gaochun is one of seven districts of the provincial capital Nanjing Output: [MT(南京高淳县住房和城乡建设局 城市新 区 设 计)] a plane of reference Gaochun is one of seven districts of the provincial capital Nanjing Input: {DEFAULT_PROMPT_INPUT_TAG} Output: """ calendar_prompt = f""" Your task is to add calls to a Calendar API to a piece of text. The API calls should help you get information required to complete the text. You can call the API by writing "[Calendar()]" Here are some examples of API calls: Input: Today is the first Friday of the year. Output: Today is the first [Calendar()] Friday of the year. Input: The president of the United States is Joe Biden. Output: The president of the United States is [Calendar()] Joe Biden. Input: The current day of the week is Wednesday. Output: The current day of the week is [Calendar()] Wednesday. Input: The number of days from now until Christmas is 30. Output: The number of days from now until Christmas is [Calendar()] 30. Input: The store is never open on the weekend, so today it is closed. Output: The store is never open on the weekend, so today [Calendar()] it is closed. Input: {DEFAULT_PROMPT_INPUT_TAG} Output: """
toolformer-pytorch-main
toolformer_pytorch/prompts.py
import re from functools import partial, wraps from collections import namedtuple import torch from torch import nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from torch.nn.utils.rnn import pad_sequence from einops import rearrange, reduce from toolformer_pytorch.palm import PaLM from toolformer_pytorch.optimizer import get_optimizer from toolformer_pytorch.prompts import DEFAULT_PROMPT_INPUT_TAG from beartype import beartype from beartype.typing import Callable, Optional, Union, List, Tuple from tqdm import tqdm from x_clip.tokenizer import tokenizer pad_sequence = partial(pad_sequence, batch_first = True) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def identity(t): return t def always(val): def inner(*args, **kwargs): return val return inner def try_except(fn, callback = identity): @wraps(fn) def inner(*args): try: return fn(*args) except Exception as e: return callback(e) return inner # tensor helpers def log(t, eps = 1e-20): return t.clamp(min = eps).log() def gumbel_noise(t): noise = torch.zeros_like(t).uniform_(0, 1) return -log(-log(noise)) def gumbel_sample(t, temperature = 1., dim = -1, eps = 1e-10): if temperature == 0: return t.argmax(dim = dim) return ((t / max(temperature, eps)) + gumbel_noise(t)).argmax(dim = dim) def top_k(logits, thres = 0.9): k = math.ceil((1 - thres) * logits.shape[-1]) val, indices = torch.topk(logits, k) probs = torch.full_like(logits, -torch.finfo(logits.dtype).max) probs.scatter_(1, indices, val) return probs def all_contains_id(t: torch.Tensor, token_id: int): mask = t == token_id return mask.any(dim = -1).all() def find_indices_of(t: torch.Tensor, token_id: int, occurrence = 1): assert occurrence > 0 mask = (t == token_id) has_occurred = mask.cumsum(dim = -1) has_occurred = F.pad(has_occurred, (1, 0), value = 0.) return (has_occurred < occurrence).sum(dim = -1).long() # invoking api call functions def is_valid_string(s): return exists(re.fullmatch(r"'[^']*'|\"[^\"]*\"", s)) def is_valid_integer(s): return exists(re.fullmatch(r"[+-]?\d+", s)) def is_valid_float(s): return exists(re.fullmatch(r"[+-]?\d+(\.\d+)?", s)) def parse_param(s: str) -> Optional[Union[int, float, str]]: if is_valid_string(s): return str(s) elif is_valid_integer(s): return int(s) elif is_valid_float(s): return float(s) return None @beartype def replace_fn( registry: dict[str, Callable], matches, delimiter = '→' ): orig_text = matches.group(0) text_without_end_api_token = matches.group(1) end_api_token = matches.group(4) function_name = matches.group(2) # unable to find function in registry if function_name not in registry: return orig_text fn = registry[function_name] params = matches.group(3).split(',') params = list(map(lambda s: s.strip(), params)) params = list(filter(len, params)) params = list(map(parse_param, params)) # if any of the parameters are not parseable, return if any([(not exists(p)) for p in params]): return orig_text # just return original text if there is some error with the function out = try_except(fn, always(None))(*params) # the api calling function can also arrest the process, by returning None if not exists(out): return orig_text # return original text with the output delimiter and the stringified output return f'{text_without_end_api_token} {delimiter} {str(out)} {end_api_token}' # main function, which takes a registry of functions, the text in question, and makes all the appropriate api calls and append the output def create_function_regex( api_start = ' [', api_stop = ']' ): api_start_regex, api_stop_regex = map(re.escape, (api_start, api_stop)) return rf'({api_start_regex}(\w+)\(([^)]*)\))({api_stop_regex})' def num_matches(substr: str, text: str): return len(re.findall(re.escape(substr), text)) def has_api_calls( text, api_start = ' [', api_stop = ']' ): regex = create_function_regex(api_start, api_stop) matches = re.findall(regex, text) return len(matches) > 0 def replace_all_but_first( text: str, api_start = ' [', api_stop = ']' ) -> str: regex = create_function_regex(api_start, api_stop) count = 0 def replace_(matches): orig_text = matches.group(0) nonlocal count count += 1 if count > 1: return '' return orig_text return re.sub(regex, replace_, text) def invoke_tools( registry: dict[str, Callable], text: str, delimiter: str = '→', api_start = ' [', api_stop = ' ]' ) -> str: regex = create_function_regex(api_start, api_stop) replace_ = partial(replace_fn, registry, delimiter = delimiter) return re.sub(regex, replace_, text) def invoke_tools_on_batch_sequences( registry: dict[str, Callable], token_ids: torch.Tensor, *, encode: Callable, decode: Callable, delimiter: str = '→', api_start = ' [', api_stop = ']' ) -> torch.Tensor: regex = create_function_regex(api_start_regex, api_stop_regex) all_texts = [decode(one_seq_token_ids) for one_seq_token_ids in token_ids] invoke_tools_ = partial(invoke_tools, api_start = api_start, api_stop = api_stop) all_texts_with_api_calls = [invoke_tools_(registry, text, delimiter) for text in all_texts] return encode(all_texts_with_api_calls) # sampling api related functions # they do greedy sampling, but encourage sampling api calls by auto-selecting <api> when that token is in the top k = 10 @beartype @torch.no_grad() def sample( model: nn.Module, *, seq_len, prime: Optional[torch.Tensor] = None, positions: Optional[torch.Tensor] = None, batch_size = 1, eos_token_id = None, sos_token_id = 1, temperature = 0., pad_id = 0, call_api_only_once = False, api_start_token_id = None, auto_select_api_start_token_when_topk = False, select_api_start_id_top_k = 10, ): device = next(model.parameters()).device max_seq_len = seq_len + 1 # validate if call_api_only_once: assert exists(api_start_token_id) # prime if exists(prime): batch_size, prime_length = prime.shape else: prime_length = 1 prime = torch.full((batch_size, 1), sos_token_id, device = device, dtype = torch.long) prime = prime.to(device) # sampling positions - different sequences have different cursors if exists(positions): positions = positions.clone() else: positions = torch.zeros((batch_size,), device = device, dtype = torch.long) assert (positions <= (prime_length + 1)).all() and (positions <= max_seq_len).all(), 'all positions must be less then initial prime length as well as the total sequence length + 1 (plus one for noop if one sequence finished sampling before the other)' # eval model model.eval() # lengthen the prime to the entire sequence length remain_iterations = seq_len - prime_length output = F.pad(prime, (0, max_seq_len - prime_length), value = 0.) batch_indices = torch.arange(batch_size, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') position_indices = rearrange(positions, 'b -> b 1') # determine the <api> token mask, for making sure api is called only once, masking out logit to prevent it from being selected for those rows which already contains an <api> token api_token_mask = None # lazily created, since do not know logit dimensions def create_api_token_mask(num_tokens, api_start_token_id): mask = torch.zeros((1, 1, num_tokens), dtype = torch.bool) assert api_start_token_id < num_tokens mask[..., api_start_token_id] = True return mask # start iterating for iteration in tqdm(range(remain_iterations)): logits = model(output) last_logits = logits[batch_indices, position_indices] # this will ensure that each batch token sequence will have at most one <api> token if call_api_only_once: if not exists(api_token_mask): num_tokens = last_logits.shape[-1] api_token_mask = create_api_token_mask(num_tokens, api_start_token_id) api_token_mask = api_token_mask.to(device) api_called = (output == api_start_token_id).any(dim = -1) logit_mask = api_token_mask & rearrange(api_called, 'b -> b 1 1') last_logits = last_logits.masked_fill(logit_mask, -torch.finfo(last_logits.dtype).max) # greedy sample (but could be made non-greedy) sampled = gumbel_sample(last_logits, temperature = temperature) # for those sequences without an api call, if the api_start_token_id is within top k (set to 10 in paper) of logits, just auto-select # seems to be an important hack in the paper # it seems like this paper will take a lot more follow up research to be viable if auto_select_api_start_token_when_topk: top_token_ids = last_logits.topk(select_api_start_id_top_k, dim = -1).indices has_api_token_in_topk = (top_token_ids == api_start_token_id).any(dim = -1) should_auto_select_api_token = has_api_token_in_topk & ~rearrange(api_called, 'b -> b 1') sampled = sampled.masked_fill(should_auto_select_api_token, api_start_token_id) # set the sampled tokens at the right curosr positions output[batch_indices, position_indices] = sampled # increment positions position_indices += 1 position_indices.clamp_(max = seq_len) # noop if one sequence is further along and near the end # if using <eos> tokens, look for all sequences having it and terminate, also anything after <eos> will be padded if exists(eos_token_id): eos_mask = (output == eos_token_id) all_rows_have_eos = eos_mask.any(dim = -1).all() if all_rows_have_eos: keep_mask = eos_mask.cumsum(dim = -1) == 0 keep_mask = F.pad(keep_mask, (1, 0), value = True) output = output.masked_fill(~keep_mask, pad_id) break # remove the last token in output (use as noop placeholder) output = output[:, :-1] return output @beartype @torch.no_grad() def sample_with_api_call( model: nn.Module, *, seq_len, call_apis: Callable, prime: torch.Tensor, api_end_token_id: int, occurrence = 1, **kwargs ): sampled = sample( model = model, prime = prime, seq_len = seq_len, **kwargs ) sampled = call_apis(sampled) sampled_seq_len = sampled.shape[-1] null_positions = sampled_seq_len # handle sequences that do not have api calls pos_starting_at_end_of_api = find_indices_of( sampled, api_end_token_id, occurrence = occurrence ) resample_after_api_calls = sample( model = model, prime = sampled, seq_len = sampled_seq_len, positions = (pos_starting_at_end_of_api + 1).clamp(max = null_positions), # start at the position right after the </api> **kwargs ) return resample_after_api_calls # the main contribution of the paper is simply the filtering equations presented in section 2 def default_weight_fn(t): # following the formula in section 4.1 - however, not sure what w_s is in the denominator # if t stands for each timestep, this would also mean within 5 tokens it would diminish to 0? return (1. - t * 0.2).clamp(min = 0.) def get_pred_prob(token_ids, logits): logits = logits[:, :-1] # logits of each token... (omit last logit) token_ids = token_ids[:, 1:] # predicts the next token id (omit first token id) token_ids = rearrange(token_ids, 'b n -> b n 1') probs = logits.softmax(dim = -1) correct_token_id_pred_prob = probs.gather(-1, token_ids) return rearrange(correct_token_id_pred_prob, 'b n 1 -> b n') def get_arange_start_at_token_id( token_ids: torch.Tensor, token_id: int, pad_id = -1 ): is_token_id_mask = token_ids == token_id arange = (is_token_id_mask.cumsum(dim = -1) > 0).cumsum(dim = -1) before_token_mask = arange == 0 arange = arange - 1 arange = arange.masked_fill(before_token_mask, pad_id) return arange def weight_and_mask( token_ids: torch.Tensor, token_id: int, pad_id = -1, weighting_fn: Callable = default_weight_fn ): t = get_arange_start_at_token_id(token_ids, token_id, pad_id) weights = weighting_fn(t) return weights.masked_fill(weights == pad_id, 0.) FilteredResults = namedtuple('FilteredResults', [ 'num_passed', 'num_failed', 'selected_indices', 'selected_mask', 'filtered_tokens', 'filtered_tokens_without_api_response', 'filtered_tokens_with_api_response' ]) @beartype def filter_tokens_with_api_response( model: nn.Module, # the language model should accept the token ids below and return the logits in shape (batch, seq, num tokens) *, tokens: torch.Tensor, # token ids (batch, seq) of the original passage, without api calls tokens_without_api_response: torch.Tensor, # token ids (batch, seq) of the passage, but with the api call (but without a response filled in) - <api>tool1(x, y)</api> tokens_with_api_response: torch.Tensor, # token ids (batch, seq) of the passage with api call and the response - <api>tool1(x, y) → {response}</api> api_start_token_id: int, # token id of the <api> tag api_end_token_id: int, # token id of the </api> tag filter_threshold: float = 1., # the threshold at which to accept the sampled api call (tokens_with_api_response) for fine-tuning weighting_fn: Callable = default_weight_fn # weighting function ) -> FilteredResults: # validations assert all([*map(lambda t: t.dtype == torch.long, (tokens, tokens_with_api_response, tokens_without_api_response))]) assert all_contains_id(tokens_without_api_response, api_start_token_id) assert all_contains_id(tokens_without_api_response, api_end_token_id) assert all_contains_id(tokens_with_api_response, api_start_token_id) assert all_contains_id(tokens_with_api_response, api_end_token_id) # auto set devices device = next(model.parameters()).device tokens, tokens_without_api_response, tokens_with_api_response = map(lambda t: t.to(device), (tokens, tokens_without_api_response, tokens_with_api_response)) # get all the logits with torch.no_grad(): model.eval() logits, logits_without_api_response, logits_with_api_response = map(model, (tokens, tokens_without_api_response, tokens_with_api_response)) # derive all predicted prob of the actual next token id in sequence probs = get_pred_prob(tokens, logits) probs_without_api_response = get_pred_prob(tokens_without_api_response, logits_without_api_response) probs_with_api_response = get_pred_prob(tokens_with_api_response, logits_with_api_response) weight_and_mask_fn = partial(weight_and_mask, weighting_fn = weighting_fn) # derive the weighting weight_without_api_response = weight_and_mask_fn(tokens_without_api_response[:, :-1], api_end_token_id) weight_with_api_response = weight_and_mask_fn(tokens_with_api_response[:, :-1], api_end_token_id) # deriving the weighting for the original passage is more tricky # would need to start counting up from <api> start token location # this would also assume that the language model perfectly copied the passage over and that both token ids are aligned except for the inserted API call - but this can be done with the custom filtering functions eventually weight = weight_and_mask_fn(tokens_without_api_response[:, 1:], api_start_token_id) # shift to the left by one since <api> does not exist in the original sequence weight = weight[:, :probs.shape[-1]] # get the loss L for all three types of sequences def loss_fn(weight, probs): return (weight * -log(probs)).sum(dim = -1) loss = loss_fn(weight, probs) loss_without_api_response = loss_fn(weight_without_api_response, probs_without_api_response) loss_with_api_response = loss_fn(weight_with_api_response, probs_with_api_response) # calculate the main formula in the paper # loss+ = loss with api response # loss- = min(loss without api response, loss without api at all) loss_plus = loss_with_api_response loss_minus = torch.minimum(loss_without_api_response, loss) selected_mask = (loss_minus - loss_plus) >= filter_threshold # now we can select and return the entries that survived the filtering stage # also returning the selected indices of the batch being processed # for finetuning the model into toolformer batch = tokens.shape[0] indices = torch.arange(batch, device = tokens.device) selected_indices = indices[selected_mask] ret = FilteredResults( selected_mask.sum().item(), (~selected_mask).sum().item(), selected_indices, selected_mask, tokens[selected_mask], tokens_without_api_response[selected_mask], tokens_with_api_response[selected_mask] ) return ret # datasets and dataloaders # for bootstrapping the initial datasets with api calls # as well as for the final finetuning @beartype class PromptDataset(Dataset): def __init__( self, prompt: str, prompt_input_tag: str, data: List[str], tokenizer_encode: Callable ): self.data = data self.prompt = prompt self.prompt_input_tag_regex = re.escape(prompt_input_tag) def __len__(self): return len(self.data) def __getitem__(self, idx): data_string = self.data[idx] data_with_prompt = re.sub(self.prompt_input_tag_regex, data_string, self.prompt) token_ids = tokenizer.encode(data_with_prompt) return torch.tensor(token_ids).long(), torch.tensor(len(token_ids)).long() def prompt_collate_fn(data, padding_value = 0): prompts, prompt_lengths = zip(*data) prompts = pad_sequence(prompts, padding_value = padding_value) return prompts, torch.stack(prompt_lengths) def PromptDataloader(ds: Dataset, *args, padding_value = 0, **kwargs): collate_fn = partial(prompt_collate_fn, padding_value = padding_value) return DataLoader(ds, *args, collate_fn = collate_fn, **kwargs) class FinetuneDataset(Dataset): def __init__( self, tokens: torch.Tensor ): self.tokens = tokens def __len__(self): return len(self.tokens) def __getitem__(self, idx): return self.tokens[idx] def FinetuneDataloader(ds: Dataset, *args, padding_value = 0, **kwargs): return DataLoader(ds, *args, collate_fn = partial(pad_sequence, padding_value = padding_value), **kwargs) # classes @beartype class Toolformer(nn.Module): def __init__( self, model: nn.Module, *, tool_id: str, tool: Callable, api_start_str = ' [', api_stop_str = ']', api_response_delimiter = '→', api_start_id = None, api_stop_id = None, teach_tool_prompt: str, filter_threshold = 1., pad_id = 0, prompt_batch_size = 4, model_seq_len = 2048, tokenizer_encode: Callable = tokenizer.encode, tokenizer_decode: Callable = tokenizer.decode, post_prompt_callback: Callable = identity, prompt_input_tag: str = DEFAULT_PROMPT_INPUT_TAG, exclude_filters: dict[str, Callable[[str], bool]] = dict(), finetune = False, finetune_lr = 1e-4, finetune_wd = 1e-2, finetune_betas = (0.9, 0.99), finetune_eps = 1e-8, finetune_epochs = 3, finetune_batch_size = 16 ): super().__init__() self.model = model self.model_seq_len = model_seq_len self.teach_tool_prompt = teach_tool_prompt self.prompt_batch_size = prompt_batch_size self.prompt_input_tag = prompt_input_tag self.post_prompt_callback = post_prompt_callback # for easy mocking self.tokenizer_encode = tokenizer_encode self.tokenizer_decode = tokenizer_decode self.tokenizer_encode_to_tensor = lambda s: torch.tensor(tokenizer_encode(s)).long() self.filter_threshold = filter_threshold self.api_start_str = api_start_str self.api_stop_str = api_stop_str self.api_response_delimiter = api_response_delimiter if not exists(api_start_id): api_start_id = tokenizer_encode(api_start_str) assert len(api_start_id) == 1 api_start_id = api_start_id[0] self.api_start_id = api_start_id if not exists(api_stop_id): api_stop_id = tokenizer_encode(api_stop_str) assert len(api_stop_id) == 1 api_stop_id = api_stop_id[0] self.api_stop_id = api_stop_id self.pad_id = pad_id self.tool_id = tool_id self.tool = tool self.registry = {tool_id: tool} assert num_matches(prompt_input_tag, teach_tool_prompt) == 1, f'there must be exactly one prompt input tag `{prompt_input_tag}` in your prompt to encourage the language model to use the designated tool' self.teach_tool_prompt = teach_tool_prompt self.exclude_filters = exclude_filters self.should_finetune = finetune if not finetune: return self.finetune_batch_size = finetune_batch_size self.finetune_epochs = finetune_epochs self.optimizer = get_optimizer( model.parameters(), lr = finetune_lr, wd = finetune_wd, betas = finetune_betas, eps = finetune_eps ) def generate_data_with_api_calls( self, data: List[str], temperature: float = 0.9 ) -> List[str]: dataset = PromptDataset( data = data, prompt_input_tag = self.prompt_input_tag, prompt = self.teach_tool_prompt, tokenizer_encode = self.tokenizer_encode ) dl = PromptDataloader( dataset, batch_size = self.prompt_batch_size ) prompted_outputs = [] for prime, positions in dl: sampled_outputs = sample( model = self.model, prime = prime, positions = positions, seq_len = self.model_seq_len, pad_id = self.pad_id, temperature = temperature ) for sample_output, position in zip(sampled_outputs, positions): start_position = position.item() prompted_output = self.tokenizer_decode(sample_output[start_position:]) prompted_outputs.append(prompted_output) return self.post_prompt_callback(prompted_outputs) def filter_and_keep_only_first_api_call( self, data, data_with_api_calls: List[str], return_excluded = False ): included_data = [] included_data_with_api_calls = [] included = (included_data, included_data_with_api_calls) excluded_data = [] excluded_data_with_api_calls = [] excluded = (excluded_data, excluded_data_with_api_calls) api_start_stop_kwargs = dict(api_start = self.api_start_str, api_stop = self.api_stop_str) has_api_calls_ = partial(has_api_calls, **api_start_stop_kwargs) replace_all_but_first_ = partial(replace_all_but_first, **api_start_stop_kwargs) for datum, data_with_api_call in zip(data, data_with_api_calls): if has_api_calls_(data_with_api_call): data_with_api_call = replace_all_but_first_(data_with_api_call) included_data.append(datum) included_data_with_api_calls.append(data_with_api_call) else: excluded_data.append(datum) excluded_data_with_api_calls.append(data_with_api_call) if not return_excluded: return included return included, excluded @torch.no_grad() def sample_model_with_api_calls( self, prime: Union[torch.Tensor, str], occurrence = 1, **kwargs ): self.model.eval() prime_is_str = isinstance(prime, str) if prime_is_str: prime = self.tokenizer_encode(prime) prime = torch.tensor(prime).long() prime = rearrange(prime, 'n -> 1 n') assert prime.shape[0] == 1, 'only one at a time for now' invoke_tools_ = partial(invoke_tools, self.registry) def call_apis(t: torch.Tensor): t = self.tokenizer_decode(t[0]) t = invoke_tools_(t) t = self.tokenizer_encode_to_tensor(t) return rearrange(t, 'n -> 1 n') output = sample_with_api_call( model = self.model, prime = prime, seq_len = self.model_seq_len, call_apis = call_apis, api_end_token_id = self.api_stop_id, occurrence = occurrence, **kwargs ) if not prime_is_str: return output return self.tokenizer_decode(output[0]) def make_api_calls( self, filtered_data_with_api_calls: List[str] ): invoke_tools_ = partial( invoke_tools, self.registry, api_start = self.api_start_str, api_stop = self.api_stop_str, delimiter = self.api_response_delimiter ) data_with_api_responses = [] for data in filtered_data_with_api_calls: output = invoke_tools_(data) data_with_api_responses.append(output) return data_with_api_responses def filter_by_api_responses( self, data: List[str], data_with_api_calls: List[str], data_with_api_responses: List[str] ) -> FilteredResults: to_token_ids = lambda l: pad_sequence([*map(self.tokenizer_encode_to_tensor, l)], padding_value = self.pad_id) tokens, tokens_without_api_response, tokens_with_api_response = map(to_token_ids, (data, data_with_api_calls, data_with_api_responses)) filtered_results = filter_tokens_with_api_response( model = self.model, tokens = tokens, tokens_with_api_response = tokens_with_api_response, tokens_without_api_response = tokens_without_api_response, filter_threshold = self.filter_threshold, api_start_token_id = self.api_start_id, api_end_token_id = self.api_stop_id ) return filtered_results def finetune( self, filtered_results: Union[FilteredResults, torch.Tensor] ): self.model.train() if isinstance(filtered_results, FilteredResults): filtered_results = filtered_results.filtered_tokens_without_api_response dataset = FinetuneDataset(tokens = filtered_results) dl = FinetuneDataloader(dataset, batch_size = self.finetune_batch_size, shuffle = True) for epoch in tqdm(range(self.finetune_epochs), desc = 'finetune epochs'): for batch in dl: inp, labels = batch[:, :-1], batch[:, 1:] logits = self.model(inp) logits = rearrange(logits, 'b n c -> b c n') loss = F.cross_entropy(logits, labels, ignore_index = self.pad_id) loss.backward() print(f'loss: {loss.item()}') self.optimizer.step() self.optimizer.zero_grad() print(f'finished finetuning on {len(dataset)} filtered samples') def forward( self, data: List[str], return_after_generating_api_calls = False, return_after_making_api_calls = False, return_after_filtering_api_calls = False, return_after_filtering_by_api_response = False ): data_with_api_calls = self.generate_data_with_api_calls(data) if return_after_generating_api_calls: return data_with_api_calls filtered_data, filtered_data_with_api_calls = self.filter_and_keep_only_first_api_call(data, data_with_api_calls) if return_after_filtering_api_calls: return filtered_data, filtered_data_with_api_calls assert len(filtered_data_with_api_calls) > 0, 'your model failed to follow instructions and make API calls. please try a better model or do some better prompt engineering' data_with_responses = self.make_api_calls(filtered_data_with_api_calls) if return_after_making_api_calls: return filtered_data, filtered_data_with_api_calls, data_with_responses filtered_results = self.filter_by_api_responses(filtered_data, filtered_data_with_api_calls, data_with_responses) if return_after_filtering_by_api_response: return filtered_results if self.should_finetune: assert filtered_results.num_passed > 0, f'none of the sequences with API calls passed the filtering criteria with threshold {self.filter_threshold}' self.finetune(filtered_results) return filtered_results
toolformer-pytorch-main
toolformer_pytorch/toolformer_pytorch.py
from torch.optim import AdamW, Adam def separate_weight_decayable_params(params): wd_params, no_wd_params = [], [] for param in params: param_list = no_wd_params if param.ndim < 2 else wd_params param_list.append(param) return wd_params, no_wd_params def get_optimizer( params, lr = 1e-4, wd = 1e-2, betas = (0.9, 0.99), eps = 1e-8, filter_by_requires_grad = False, group_wd_params = True, **kwargs ): has_weight_decay = wd > 0 if filter_by_requires_grad: params = list(filter(lambda t: t.requires_grad, params)) if group_wd_params and has_weight_decay: wd_params, no_wd_params = separate_weight_decayable_params(params) params = [ {'params': wd_params}, {'params': no_wd_params, 'weight_decay': 0}, ] adam_kwargs = dict(lr = lr, betas = betas, eps = eps) if not has_weight_decay: return Adam(params, **adam_kwargs) return AdamW(params, weight_decay = wd, **adam_kwargs)
toolformer-pytorch-main
toolformer_pytorch/optimizer.py
from setuptools import setup, find_packages setup( name = 'electra-pytorch', packages = find_packages(), version = '0.1.2', license='MIT', description = 'Electra - Pytorch', author = 'Erik Nijkamp, Phil Wang', author_email = '[email protected], [email protected]', url = 'https://github.com/lucidrains/electra-pytorch', keywords = [ 'transformers', 'artificial intelligence', 'pretraining' ], install_requires=[ 'torch>=1.6.0', 'transformers==3.0.2', 'scipy', 'sklearn' ], setup_requires=[ 'pytest-runner' ], tests_require=[ 'pytest', 'reformer-pytorch' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.7', ], )
electra-pytorch-master
setup.py
import torch from torch import nn from reformer_pytorch import ReformerLM from electra_pytorch import Electra def test_electra(): generator = ReformerLM( num_tokens = 20000, dim = 512, depth = 1, max_seq_len = 1024 ) discriminator = ReformerLM( num_tokens = 20000, dim = 512, depth = 2, max_seq_len = 1024 ) generator.token_emb = discriminator.token_emb generator.pos_emb = discriminator.pos_emb trainer = Electra( generator, discriminator, num_tokens = 20000, discr_dim = 512, discr_layer = 'reformer', pad_token_id = 1, mask_ignore_token_ids = [2, 3] ) data = torch.randint(0, 20000, (1, 1024)) results = trainer(data) results.loss.backward() def test_electra_without_magic(): generator = ReformerLM( num_tokens = 20000, dim = 512, depth = 1, max_seq_len = 1024 ) discriminator = ReformerLM( num_tokens = 20000, dim = 512, depth = 2, max_seq_len = 1024, return_embeddings = True ) generator.token_emb = discriminator.token_emb generator.pos_emb = discriminator.pos_emb discriminator_with_adapter = nn.Sequential( discriminator, nn.Linear(512, 1), nn.Sigmoid() ) trainer = Electra( generator, discriminator_with_adapter, num_tokens = 20000, pad_token_id = 1, mask_ignore_token_ids = [2, 3] ) data = torch.randint(0, 20000, (1, 1024)) results = trainer(data) results.loss.backward()
electra-pytorch-master
tests/test_electra_pytorch.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score _has_sklearn = True except (AttributeError, ImportError): _has_sklearn = False def is_sklearn_available(): return _has_sklearn if _has_sklearn: def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def glue_compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "hans": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def xnli_compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "xnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name)
electra-pytorch-master
examples/glue/metrics.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).""" import argparse import glob import json import logging import os import random import numpy as np import torch import torch.nn as nn from torch.optim import AdamW from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from metrics import glue_compute_metrics as compute_metrics from processors import glue_convert_examples_to_features as convert_examples_to_features from processors import glue_output_modes as output_modes from processors import glue_processors as processors from processors import glue_tasks_num_labels as task_num_labels logger = logging.getLogger(__name__) ################################################## # adapters for Google-like GLUE code class TokenizerAdapter: def __init__(self, tokenizer, pad_token, cls_token="[CLS]", sep_token="[SEP]"): self.tokenizer = tokenizer self.pad_token = pad_token self.cls_token = cls_token self.sep_token = sep_token def convert_tokens_to_ids(self, tokens): return self.tokenizer.convert_tokens_to_ids(tokens) def truncate_sequences( self, ids, pair_ids, num_tokens_to_remove, truncation_strategy, stride, ): assert len(ids) > num_tokens_to_remove window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] ids = ids[:-num_tokens_to_remove] return (ids, pair_ids, overflowing_tokens) def encode_plus(self, text, text_pair, add_special_tokens, max_length, return_token_type_ids): # Tokenization token_ids_0 = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text)) len_ids = len(token_ids_0) if text_pair: token_ids_1 = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text_pair)) len_pair_ids = len(token_ids_1) else: token_ids_1 = None len_pair_ids = 0 # Truncation assert add_special_tokens num_special_tokens_to_add = (2 if not text_pair else 3) total_len = len_ids + len_pair_ids + num_special_tokens_to_add if max_length and total_len > max_length: token_ids_0, token_ids_1, overflowing_tokens = self.truncate_sequences( token_ids_0, pair_ids=token_ids_1, num_tokens_to_remove=total_len - max_length, truncation_strategy='only_first', # TODO(nijkamp): is this the correct truncation strategy for all GLUE tasks? stride=0, ) # Add special tokens cls = [self.tokenizer.vocab[self.cls_token]] sep = [self.tokenizer.vocab[self.sep_token]] if not text_pair: input_ids = cls + token_ids_0 + sep token_type_ids = len(cls + token_ids_0 + sep) * [0] else: input_ids = cls + token_ids_0 + sep + token_ids_1 + sep token_type_ids = len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] assert len(input_ids) <= max_length return {"input_ids": input_ids, "token_type_ids": token_type_ids} def __len__(self): return len(self.tokenizer.vocab) def save_pretrained(self, outputdir): pass def wrap_tokenizer(tokenizer, pad_token): return TokenizerAdapter(tokenizer, pad_token) ################################################## # distilled Google-like/HF glue code def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch) def train(args, train_dataset, model, tokenizer): """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) except ValueError: global_step = 0 epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if step % 10 == 0: print(step, loss.item()) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps len(epoch_iterator) <= args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) ): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) print(preds) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=False, # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset # python run_glue.py \ # --model_name_or_path bert-base-uncased \ # --task_name $TASK_NAME \ # --do_train \ # --do_eval \ # --data_dir $GLUE_DIR/$TASK_NAME \ # --max_seq_length 128 \ # --per_gpu_train_batch_size 32 \ # --learning_rate 2e-5 \ # --num_train_epochs 3.0 \ # --output_dir /tmp/$TASK_NAME \ # --overwrite_output_dir \ # --cache_dir cache_glue_bert def main(task='MRPC', seed=42, ckpt='output/pretrain/2020-08-28-02-41-37/ckpt/60000'): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=f'data/glue_data/{task}', type=str, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default="bert", type=str, ) parser.add_argument( "--model_name_or_path", default=ckpt, type=str, ) parser.add_argument( "--vocab_path", default='data/vocab.txt', type=str, ) parser.add_argument( "--task_name", default=task, type=str, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default='output/glue', type=str, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", default=True, help="Whether to run training.") parser.add_argument("--do_eval", default=True, help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", default=True, help="Set this flag if you are using an uncased model.", ) parser.add_argument( "--per_gpu_train_batch_size", default=32, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", default=True, help="Overwrite the content of the output directory", ) parser.add_argument( "--overwrite_cache", default=True, help="Overwrite the cached training and evaluation sets", ) parser.add_argument("--seed", type=int, default=seed, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab from transformers import AutoConfig, AutoModelForSequenceClassification args.model_type = args.model_type.lower() config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, ) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) from pretraining.openwebtext.dataset import new_tokenizer tokenizer = wrap_tokenizer(new_tokenizer(args.vocab_path), pad_token='[PAD]') if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_to_save # TODO(nijkamp): we ignore model serialization # model = AutoModelForSequenceClassification.from_pretrained(args.output_dir) # tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: # TODO(nijkamp): we ignore model serialization # tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" # TODO(nijkamp): we ignore model serialization # model = AutoModelForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
electra-pytorch-master
examples/glue/run.py
''' Script for downloading all GLUE data. Note: for legal reasons, we are unable to host MRPC. You can either use the version hosted by the SentEval team, which is already tokenized, or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually. For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example). You should then rename and place specific files in a folder (see below for an example). mkdir MRPC cabextract MSRParaphraseCorpus.msi -d MRPC cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt rm MRPC/_* rm MSRParaphraseCorpus.msi 1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now. 2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray! ''' import os import sys import shutil import argparse import tempfile import urllib.request import zipfile TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4', "SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8', "MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc', "QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5', "STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5', "MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce', "SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df', "QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601', "RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb', "WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf', "diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'} MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt' MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt' def download_and_extract(task, data_dir): print("Downloading and extracting %s..." % task) data_file = "%s.zip" % task urllib.request.urlretrieve(TASK2PATH[task], data_file) with zipfile.ZipFile(data_file) as zip_ref: zip_ref.extractall(data_dir) os.remove(data_file) print("\tCompleted!") def format_mrpc(data_dir, path_to_data): print("Processing MRPC...") mrpc_dir = os.path.join(data_dir, "MRPC") if not os.path.isdir(mrpc_dir): os.mkdir(mrpc_dir) if path_to_data: mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") else: print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) dev_ids = [] with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: for row in ids_fh: dev_ids.append(row.strip().split('\t')) with open(mrpc_train_file, encoding="utf8") as data_fh, \ open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \ open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh: header = data_fh.readline() train_fh.write(header) dev_fh.write(header) for row in data_fh: label, id1, id2, s1, s2 = row.strip().split('\t') if [id1, id2] in dev_ids: dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) else: train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) with open(mrpc_test_file, encoding="utf8") as data_fh, \ open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh: header = data_fh.readline() test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") for idx, row in enumerate(data_fh): label, id1, id2, s1, s2 = row.strip().split('\t') test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) print("\tCompleted!") def download_diagnostic(data_dir): print("Downloading and extracting diagnostic...") if not os.path.isdir(os.path.join(data_dir, "diagnostic")): os.mkdir(os.path.join(data_dir, "diagnostic")) data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) print("\tCompleted!") return def get_tasks(task_names): task_names = task_names.split(',') if "all" in task_names: tasks = TASKS else: tasks = [] for task_name in task_names: assert task_name in TASKS, "Task %s not found!" % task_name tasks.append(task_name) return tasks def main(arguments): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', help='directory to save data to', type=str, default='./data/glue_data') parser.add_argument('--tasks', help='tasks to download data for as a comma separated string', type=str, default='all') parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt', type=str, default='') args = parser.parse_args(arguments) if not os.path.exists(args.data_dir): os.makedirs(args.data_dir) tasks = get_tasks(args.tasks) for task in tasks: if task == 'MRPC': format_mrpc(args.data_dir, args.path_to_mrpc) elif task == 'diagnostic': download_diagnostic(args.data_dir) else: download_and_extract(task, args.data_dir) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
electra-pytorch-master
examples/glue/download.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import csv import dataclasses import json import logging from dataclasses import dataclass from typing import Optional # from ...file_utils import is_tf_available, is_torch_available is_torch_available = lambda: True is_tf_available = lambda: False logger = logging.getLogger(__name__) @dataclass(frozen=True) class InputExample: """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ guid: str text_a: str text_b: Optional[str] = None label: Optional[str] = None def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" class InputFeatures(object): """ A single set of features of data. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens. token_type_ids: Segment token indices to indicate first and second portions of the inputs. label: Label corresponding to the input """ def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None): self.input_ids = input_ids self.attention_mask = attention_mask self.token_type_ids = token_type_ids self.label = label def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_example_from_tensor_dict(self, tensor_dict): """Gets an example from a dict with tensorflow tensors Args: tensor_dict: Keys and values should match the corresponding Glue tensorflow_dataset examples. """ raise NotImplementedError() def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() def tfds_map(self, example): """Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.""" if len(self.get_labels()) > 1: example.label = self.get_labels()[int(example.label)] return example @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8-sig") as f: return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) class SingleSentenceClassificationProcessor(DataProcessor): """ Generic processor for a single sentence classification data set.""" def __init__(self, labels=None, examples=None, mode="classification", verbose=False): self.labels = [] if labels is None else labels self.examples = [] if examples is None else examples self.mode = mode self.verbose = verbose def __len__(self): return len(self.examples) def __getitem__(self, idx): if isinstance(idx, slice): return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) return self.examples[idx] @classmethod def create_from_csv( cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs ): processor = cls(**kwargs) processor.add_examples_from_csv( file_name, split_name=split_name, column_label=column_label, column_text=column_text, column_id=column_id, skip_first_row=skip_first_row, overwrite_labels=True, overwrite_examples=True, ) return processor @classmethod def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): processor = cls(**kwargs) processor.add_examples(texts_or_text_and_labels, labels=labels) return processor def add_examples_from_csv( self, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False, ): lines = self._read_tsv(file_name) if skip_first_row: lines = lines[1:] texts = [] labels = [] ids = [] for (i, line) in enumerate(lines): texts.append(line[column_text]) labels.append(line[column_label]) if column_id is not None: ids.append(line[column_id]) else: guid = "%s-%s" % (split_name, i) if split_name else "%s" % i ids.append(guid) return self.add_examples( texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples ) def add_examples( self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False ): assert labels is None or len(texts_or_text_and_labels) == len(labels) assert ids is None or len(texts_or_text_and_labels) == len(ids) if ids is None: ids = [None] * len(texts_or_text_and_labels) if labels is None: labels = [None] * len(texts_or_text_and_labels) examples = [] added_labels = set() for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids): if isinstance(text_or_text_and_label, (tuple, list)) and label is None: text, label = text_or_text_and_label else: text = text_or_text_and_label added_labels.add(label) examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) # Update examples if overwrite_examples: self.examples = examples else: self.examples.extend(examples) # Update labels if overwrite_labels: self.labels = list(added_labels) else: self.labels = list(set(self.labels).union(added_labels)) return self.examples def get_features( self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None, ): """ Convert examples in a list of ``InputFeatures`` Args: tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method output_mode: String indicating the output mode. Either ``regression`` or ``classification`` pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default) pad_token: Padding token mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for actual values) Returns: If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset`` containing the task-specific features. If the input is a list of ``InputExamples``, will return a list of task-specific ``InputFeatures`` which can be fed to the model. """ if max_length is None: max_length = tokenizer.max_len label_map = {label: i for i, label in enumerate(self.labels)} all_input_ids = [] for (ex_index, example) in enumerate(self.examples): if ex_index % 10000 == 0: logger.info("Tokenizing example %d", ex_index) input_ids = tokenizer.encode( example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len), ) all_input_ids.append(input_ids) batch_length = max(len(input_ids) for input_ids in all_input_ids) features = [] for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)): if ex_index % 10000 == 0: logger.info("Writing example %d/%d" % (ex_index, len(self.examples))) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = batch_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) assert len(input_ids) == batch_length, "Error with input length {} vs {}".format( len(input_ids), batch_length ) assert len(attention_mask) == batch_length, "Error with input length {} vs {}".format( len(attention_mask), batch_length ) if self.mode == "classification": label = label_map[example.label] elif self.mode == "regression": label = float(example.label) else: raise ValueError(self.mode) if ex_index < 5 and self.verbose: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask])) logger.info("label: %s (id = %d)" % (example.label, label)) features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) if return_tensors is None: return features elif return_tensors == "tf": if not is_tf_available(): raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") import tensorflow as tf def gen(): for ex in features: yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), ) return dataset elif return_tensors == "pt": if not is_torch_available(): raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") import torch from torch.utils.data import TensorDataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if self.mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif self.mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) return dataset else: raise ValueError("return_tensors should be one of 'tf' or 'pt'")
electra-pytorch-master
examples/glue/utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GLUE processors and helpers """ import logging import os # from ...file_utils import is_tf_available from utils import DataProcessor, InputExample, InputFeatures is_tf_available = lambda: False if is_tf_available(): import tensorflow as tf logger = logging.getLogger(__name__) def glue_convert_examples_to_features( examples, tokenizer, max_length=512, task=None, label_list=None, output_mode=None, pad_on_left=False, pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True, ): """ Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method output_mode: String indicating the output mode. Either ``regression`` or ``classification`` pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default) pad_token: Padding token pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4) mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for actual values) Returns: If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset`` containing the task-specific features. If the input is a list of ``InputExamples``, will return a list of task-specific ``InputFeatures`` which can be fed to the model. """ is_tf_dataset = False if is_tf_available() and isinstance(examples, tf.data.Dataset): is_tf_dataset = True if task is not None: processor = glue_processors[task]() if label_list is None: label_list = processor.get_labels() logger.info("Using label list %s for task %s" % (label_list, task)) if output_mode is None: output_mode = glue_output_modes[task] logger.info("Using output mode %s for task %s" % (output_mode, task)) label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): len_examples = 0 if is_tf_dataset: example = processor.get_example_from_tensor_dict(example) example = processor.tfds_map(example) len_examples = tf.data.experimental.cardinality(examples) else: len_examples = len(examples) if ex_index % 10000 == 0: logger.info("Writing example %d/%d" % (ex_index, len_examples)) inputs = tokenizer.encode_plus( example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, return_token_type_ids=True, ) input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"] # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length) assert len(attention_mask) == max_length, "Error with input length {} vs {}".format( len(attention_mask), max_length ) assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format( len(token_type_ids), max_length ) if output_mode == "classification": label = label_map[example.label] elif output_mode == "regression": label = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask])) logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids])) logger.info("label: %s (id = %d)" % (example.label, label)) features.append( InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label ) ) if is_tf_available() and is_tf_dataset: def gen(): for ex in features: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) return tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), }, tf.TensorShape([]), ), ) return features class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["premise"].numpy().decode("utf-8"), tensor_dict["hypothesis"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence"].numpy().decode("utf-8"), None, str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence"].numpy().decode("utf-8"), None, str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the QQP data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["question1"].numpy().decode("utf-8"), tensor_dict["question2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the QNLI data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["question"].numpy().decode("utf-8"), tensor_dict["sentence"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples glue_tasks_num_labels = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } glue_processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, } glue_output_modes = { "cola": "classification", "mnli": "classification", "mnli-mm": "classification", "mrpc": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", }
electra-pytorch-master
examples/glue/processors.py
from electra_pytorch.electra_pytorch import Electra
electra-pytorch-master
electra_pytorch/__init__.py
import math from functools import reduce from collections import namedtuple import torch from torch import nn import torch.nn.functional as F # constants Results = namedtuple('Results', [ 'loss', 'mlm_loss', 'disc_loss', 'gen_acc', 'disc_acc', 'disc_labels', 'disc_predictions' ]) # helpers def log(t, eps=1e-9): return torch.log(t + eps) def gumbel_noise(t): noise = torch.zeros_like(t).uniform_(0, 1) return -log(-log(noise)) def gumbel_sample(t, temperature = 1.): return ((t / temperature) + gumbel_noise(t)).argmax(dim=-1) def prob_mask_like(t, prob): return torch.zeros_like(t).float().uniform_(0, 1) < prob def mask_with_tokens(t, token_ids): init_no_mask = torch.full_like(t, False, dtype=torch.bool) mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask) return mask def get_mask_subset_with_prob(mask, prob): batch, seq_len, device = *mask.shape, mask.device max_masked = math.ceil(prob * seq_len) num_tokens = mask.sum(dim=-1, keepdim=True) mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil()) mask_excess = mask_excess[:, :max_masked] rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9) _, sampled_indices = rand.topk(max_masked, dim=-1) sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0) new_mask = torch.zeros((batch, seq_len + 1), device=device) new_mask.scatter_(-1, sampled_indices, 1) return new_mask[:, 1:].bool() # hidden layer extractor class, for magically adding adapter to language model to be pretrained class HiddenLayerExtractor(nn.Module): def __init__(self, net, layer = -2): super().__init__() self.net = net self.layer = layer self.hidden = None self.hook_registered = False def _find_layer(self): if type(self.layer) == str: modules = dict([*self.net.named_modules()]) return modules.get(self.layer, None) elif type(self.layer) == int: children = [*self.net.children()] return children[self.layer] return None def _hook(self, _, __, output): self.hidden = output def _register_hook(self): layer = self._find_layer() assert layer is not None, f'hidden layer ({self.layer}) not found' handle = layer.register_forward_hook(self._hook) self.hook_registered = True def forward(self, x): if self.layer == -1: return self.net(x) if not self.hook_registered: self._register_hook() _ = self.net(x) hidden = self.hidden self.hidden = None assert hidden is not None, f'hidden layer {self.layer} never emitted an output' return hidden # main electra class class Electra(nn.Module): def __init__( self, generator, discriminator, *, num_tokens = None, discr_dim = -1, discr_layer = -1, mask_prob = 0.15, replace_prob = 0.85, random_token_prob = 0., mask_token_id = 2, pad_token_id = 0, mask_ignore_token_ids = [], disc_weight = 50., gen_weight = 1., temperature = 1.): super().__init__() self.generator = generator self.discriminator = discriminator if discr_dim > 0: self.discriminator = nn.Sequential( HiddenLayerExtractor(discriminator, layer = discr_layer), nn.Linear(discr_dim, 1) ) # mlm related probabilities self.mask_prob = mask_prob self.replace_prob = replace_prob self.num_tokens = num_tokens self.random_token_prob = random_token_prob # token ids self.pad_token_id = pad_token_id self.mask_token_id = mask_token_id self.mask_ignore_token_ids = set([*mask_ignore_token_ids, pad_token_id]) # sampling temperature self.temperature = temperature # loss weights self.disc_weight = disc_weight self.gen_weight = gen_weight def forward(self, input, **kwargs): b, t = input.shape replace_prob = prob_mask_like(input, self.replace_prob) # do not mask [pad] tokens, or any other tokens in the tokens designated to be excluded ([cls], [sep]) # also do not include these special tokens in the tokens chosen at random no_mask = mask_with_tokens(input, self.mask_ignore_token_ids) mask = get_mask_subset_with_prob(~no_mask, self.mask_prob) # get mask indices mask_indices = torch.nonzero(mask, as_tuple=True) # mask input with mask tokens with probability of `replace_prob` (keep tokens the same with probability 1 - replace_prob) masked_input = input.clone().detach() # set inverse of mask to padding tokens for labels gen_labels = input.masked_fill(~mask, self.pad_token_id) # clone the mask, for potential modification if random tokens are involved # not to be mistakened for the mask above, which is for all tokens, whether not replaced nor replaced with random tokens masking_mask = mask.clone() # if random token probability > 0 for mlm if self.random_token_prob > 0: assert self.num_tokens is not None, 'Number of tokens (num_tokens) must be passed to Electra for randomizing tokens during masked language modeling' random_token_prob = prob_mask_like(input, self.random_token_prob) random_tokens = torch.randint(0, self.num_tokens, input.shape, device=input.device) random_no_mask = mask_with_tokens(random_tokens, self.mask_ignore_token_ids) random_token_prob &= ~random_no_mask masked_input = torch.where(random_token_prob, random_tokens, masked_input) # remove random token prob mask from masking mask masking_mask = masking_mask & ~random_token_prob # [mask] input masked_input = masked_input.masked_fill(masking_mask * replace_prob, self.mask_token_id) # get generator output and get mlm loss logits = self.generator(masked_input, **kwargs) mlm_loss = F.cross_entropy( logits.transpose(1, 2), gen_labels, ignore_index = self.pad_token_id ) # use mask from before to select logits that need sampling sample_logits = logits[mask_indices] # sample sampled = gumbel_sample(sample_logits, temperature = self.temperature) # scatter the sampled values back to the input disc_input = input.clone() disc_input[mask_indices] = sampled.detach() # generate discriminator labels, with replaced as True and original as False disc_labels = (input != disc_input).float().detach() # get discriminator predictions of replaced / original non_padded_indices = torch.nonzero(input != self.pad_token_id, as_tuple=True) # get discriminator output and binary cross entropy loss disc_logits = self.discriminator(disc_input, **kwargs) disc_logits = disc_logits.reshape_as(disc_labels) disc_loss = F.binary_cross_entropy_with_logits( disc_logits[non_padded_indices], disc_labels[non_padded_indices] ) # gather metrics with torch.no_grad(): gen_predictions = torch.argmax(logits, dim=-1) disc_predictions = torch.round((torch.sign(disc_logits) + 1.0) * 0.5) gen_acc = (gen_labels[mask] == gen_predictions[mask]).float().mean() disc_acc = 0.5 * (disc_labels[mask] == disc_predictions[mask]).float().mean() + 0.5 * (disc_labels[~mask] == disc_predictions[~mask]).float().mean() # return weighted sum of losses return Results(self.gen_weight * mlm_loss + self.disc_weight * disc_loss, mlm_loss, disc_loss, gen_acc, disc_acc, disc_labels, disc_predictions)
electra-pytorch-master
electra_pytorch/electra_pytorch.py
import os import sys dir_path = os.path.dirname(os.path.realpath(__file__)) parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir)) sys.path.insert(0, parent_dir_path) import random import logging from time import time from dataclasses import dataclass import numpy as np import torch from torch.optim.lr_scheduler import LambdaLR from torch.utils.data.dataloader import DataLoader from electra_pytorch import Electra from openwebtext import arg from openwebtext.dataset import load_owt, new_tokenizer, wrap_example_builder logger = logging.getLogger(__name__) ######################################################################################################## ## args @dataclass class Args: data_dir: arg.Str = 'data/openwebtext_features' data_vocab_file: arg.Str = 'data/vocab.txt' data_n_tensors_per_file: arg.Int = 2048 data_max_seq_length: arg.Int = 128 gpu: arg.Int = 0 gpu_enabled: arg.Bool = True gpu_deterministic: arg.Bool = False gpu_mixed_precision: arg.Bool = False distributed_port: arg.Int = 8888 distributed_enabled: arg.Bool = True distributed_world_size: arg.Int = 4 model_generator: arg.Str = 'pretraining/openwebtext/small_generator.json' model_discriminator: arg.Str = 'pretraining/openwebtext/small_discriminator.json' model_mask_prob: arg.Float = 0.15 opt_lr: arg.Float = 5e-4 opt_batch_size: arg.Int = 128 // (distributed_world_size if distributed_enabled else 1) opt_warmup_steps: arg.Int = 10_000 opt_num_training_steps: arg.Int = 200_000 step_log: arg.Int = 10 step_ckpt: arg.Int = 10_000 ######################################################################################################## ## train def train(rank, args): ####################### ## distributed if args.distributed_enabled: torch.distributed.init_process_group( backend='nccl', init_method='env://', world_size=args.distributed_world_size, rank=rank) if args.gpu_enabled: device = torch.device('cuda:{}'.format(rank)) else: device = torch.device('cpu') is_master = True if not args.distributed_enabled else args.distributed_enabled and rank == 0 ####################### ## preamble set_gpus(rank) set_seed(rank) set_cuda(deterministic=args.gpu_deterministic) output_dir = f'{args.output_dir}/{rank}' os.makedirs(output_dir, exist_ok=False) setup_logging(filename=f'{output_dir}/output.log', console=is_master) ####################### ## dataset tokenizer = new_tokenizer(vocab_file=args.data_vocab_file) vocab_size = len(tokenizer.vocab) ds_train = wrap_example_builder(dataset=load_owt(owt_dir=args.data_dir, n_tensors_per_file=args.data_n_tensors_per_file), vocab=tokenizer.vocab, max_length=args.data_max_seq_length) pad_token_id = tokenizer.vocab['[PAD]'] mask_token_id = tokenizer.vocab['[MASK]'] cls_token_id = tokenizer.vocab['[CLS]'] sep_token_id = tokenizer.vocab['[SEP]'] assert pad_token_id == 0 assert cls_token_id == 101 assert sep_token_id == 102 assert mask_token_id == 103 def collate_batch(examples): input_ids = torch.nn.utils.rnn.pad_sequence([example['input_ids'] for example in examples], batch_first=True, padding_value=pad_token_id) input_mask = torch.nn.utils.rnn.pad_sequence([example['input_mask'] for example in examples], batch_first=True, padding_value=pad_token_id) segment_ids = torch.nn.utils.rnn.pad_sequence([example['segment_ids'] for example in examples], batch_first=True, padding_value=pad_token_id) return input_ids, input_mask, segment_ids def cycle(iterable): while True: for x in iterable: yield x ds_train_loader = iter(cycle(DataLoader(ds_train, batch_size=args.opt_batch_size, collate_fn=collate_batch))) ####################### ## model def to_distributed_model(model): return model if not args.distributed_enabled else torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True) def tie_weights(generator, discriminator): generator.electra.embeddings.word_embeddings = discriminator.electra.embeddings.word_embeddings generator.electra.embeddings.position_embeddings = discriminator.electra.embeddings.position_embeddings generator.electra.embeddings.token_type_embeddings = discriminator.electra.embeddings.token_type_embeddings class LogitsAdapter(torch.nn.Module): def __init__(self, adaptee): super().__init__() self.adaptee = adaptee def forward(self, *args, **kwargs): return self.adaptee(*args, **kwargs)[0] from transformers import AutoConfig, ElectraForMaskedLM, ElectraForPreTraining generator = ElectraForMaskedLM(AutoConfig.from_pretrained(args.model_generator)) discriminator = ElectraForPreTraining(AutoConfig.from_pretrained(args.model_discriminator)) tie_weights(generator, discriminator) model = to_distributed_model(Electra( LogitsAdapter(generator), LogitsAdapter(discriminator), num_tokens = vocab_size, mask_token_id = mask_token_id, pad_token_id = pad_token_id, mask_prob = args.model_mask_prob, mask_ignore_token_ids = [tokenizer.vocab['[CLS]'], tokenizer.vocab['[SEP]']], random_token_prob = 0.0).to(device)) ####################### ## optimizer def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step): learning_rate = max(0.0, 1. - (float(current_step) / float(num_training_steps))) learning_rate *= min(1.0, float(current_step) / float(num_warmup_steps)) return learning_rate return LambdaLR(optimizer, lr_lambda, last_epoch) def get_params_without_weight_decay_ln(named_params, weight_decay): no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ { 'params': [p for n, p in named_params if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay, }, { 'params': [p for n, p in named_params if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, }, ] return optimizer_grouped_parameters optimizer = torch.optim.AdamW(get_params_without_weight_decay_ln(model.named_parameters(), weight_decay=0.1), lr=args.opt_lr, betas=(0.9, 0.999), eps=1e-08) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.opt_warmup_steps, num_training_steps=args.opt_num_training_steps) scaler = torch.cuda.amp.GradScaler(enabled=args.gpu_mixed_precision) ####################### ## train t, steps_s, eta_m = time(), 0., 0 for step in range(args.opt_num_training_steps+1): input_ids, input_mask, segment_ids = next(ds_train_loader) input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) assert input_ids.shape[1] <= args.data_max_seq_length optimizer.zero_grad() with torch.cuda.amp.autocast(enabled=args.gpu_mixed_precision): loss, loss_mlm, loss_disc, acc_gen, acc_disc, disc_labels, disc_pred = model(input_ids, attention_mask=input_mask, token_type_ids=segment_ids) scaler.scale(loss).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) scaler.step(optimizer) scaler.update() scheduler.step() metrics = { 'step': (step, '{:8d}'), 'loss': (loss.item(), '{:8.5f}'), 'loss_mlm': (loss_mlm.item(), '{:8.5f}'), 'loss_disc': (loss_disc.item(), '{:8.5f}'), 'acc_gen': (acc_gen.item(), '{:5.3f}'), 'acc_disc': (acc_disc.item(), '{:5.3f}'), 'lr': (scheduler.get_last_lr()[0], '{:8.7f}'), 'steps': (steps_s, '{:4.1f}/s'), 'eta': (eta_m, '{:4d}m'), } if step % args.step_log == 0: sep = ' ' * 2 logger.info(sep.join([f'{k}: {v[1].format(v[0])}' for (k, v) in metrics.items()])) if step > 0 and step % 100 == 0: t2 = time() steps_s = 100. / (t2 - t) eta_m = int(((args.opt_num_training_steps - step) / steps_s) // 60) t = t2 if step % 200 == 0: logger.info(np.array2string(disc_labels[0].cpu().numpy(), threshold=sys.maxsize, max_line_width=sys.maxsize)) logger.info(np.array2string(disc_pred[0].cpu().numpy(), threshold=sys.maxsize, max_line_width=sys.maxsize)) if step > 0 and step % args.step_ckpt == 0 and is_master: discriminator.electra.save_pretrained(f'{args.output_dir}/ckpt/{step}') ######################################################################################################## ## preamble def set_gpus(gpu): torch.cuda.set_device(gpu) def set_seed(seed): os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) def set_cuda(deterministic=True): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = deterministic torch.backends.cudnn.benchmark = not deterministic def get_exp_id(file): return os.path.splitext(os.path.basename(file))[0] def get_output_dir(exp_id): import datetime t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') output_dir = os.path.join('output/' + exp_id, t) os.makedirs(output_dir, exist_ok=True) return output_dir def setup_logging(filename, console=True): log_format = logging.Formatter("%(asctime)s : %(message)s") logger = logging.getLogger() logger.handlers = [] file_handler = logging.FileHandler(filename) file_handler.setFormatter(log_format) logger.addHandler(file_handler) if console: console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(log_format) logger.addHandler(console_handler) logger.setLevel(logging.INFO) return logger def copy_source(file, output_dir): import shutil shutil.copyfile(file, os.path.join(output_dir, os.path.basename(file))) ######################################################################################################## ## main def main(): # preamble exp_id = get_exp_id(__file__) output_dir = get_output_dir(exp_id) os.makedirs(output_dir, exist_ok=True) os.makedirs(f'{output_dir}/ckpt', exist_ok=False) copy_source(__file__, output_dir) # args args = arg.parse_to(Args) args.output_dir = output_dir args.exp_id = exp_id # distributed if args.distributed_enabled: os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = str(args.distributed_port) torch.multiprocessing.spawn(train, nprocs=args.distributed_world_size, args=(args,)) else: train(rank=args.gpu, args=args) if __name__ == '__main__': main()
electra-pytorch-master
pretraining/openwebtext/pretrain.py
import logging import logging import math import multiprocessing import os import random import tarfile from dataclasses import dataclass from itertools import chain from functools import partial from pathlib import Path import numpy as np import torch import torch.utils.data from pretraining.openwebtext import arg from pretraining.openwebtext import tokenization logger = logging.getLogger(__name__) def parse_tokenizer(tokenizer, text): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) def create_tokenizer(vocab_file, do_lower_case=True): tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) return partial(parse_tokenizer, tokenizer) def preprocess_owt(tokenizer, src_dir, tmp_dir, trg_dir, n_dataset_building_processes, n_tensors_per_file, max_seq_length=None): # Preamble logger.info(f'Writing features to {trg_dir}.') os.makedirs(trg_dir, exist_ok=False) # Crunch files trg_dir = Path(trg_dir) src_dir = Path(src_dir) tmp_dir = Path(tmp_dir) archives = os.listdir(src_dir) n_archives_per_job = math.ceil(len(archives) / n_dataset_building_processes) job_archives = [ archives[i * n_archives_per_job : (i + 1) * n_archives_per_job] for i in range(n_dataset_building_processes) ] logger.info(f'Processing {len(archives)} archives.') assert len(archives) > 0 if n_dataset_building_processes == 1: feature_set_paths = preprocess_owt_job(tokenizer, src_dir, tmp_dir, trg_dir, job_archives, n_tensors_per_file, max_seq_length, job_id=0) else: pool = multiprocessing.Pool(processes=n_dataset_building_processes) preprocess_owt_job_partial = partial(preprocess_owt_job, tokenizer, src_dir, tmp_dir, trg_dir, job_archives, n_tensors_per_file, max_seq_length) feature_sets = pool.map(preprocess_owt_job_partial, range(n_dataset_building_processes)) feature_set_paths = [file_path for feature_set in feature_sets for file_path in feature_set] return feature_set_paths def preprocess_owt_job(tokenizer, src_dir, tmp_dir, trg_dir, job_archives, n_tensors_per_file, max_seq_length, job_id=0): ''' OpenWebText is saved under the following format: openwebtext.zip |-> archive_xxx.zip |-> file_xxx.txt |-> file_xxz.txt ... |-> archive_xxz.zip |-> file_xxy.txt ... ... ''' # Preamble os.makedirs(tmp_dir, exist_ok=True) # Process feature_index = 0 feature_set_paths = [] features = [] for archive_id, archive in enumerate(job_archives[job_id]): if os.path.isdir(src_dir / archive): logger.info(f'Ignoring rogue directory {src_dir / archive}.') continue logger.info(f'Job {job_id}: Processing {archive_id}/{len(job_archives[job_id])} {src_dir / archive}.') with tarfile.open(src_dir / archive) as t: extracted_archive = tmp_dir / f'{archive}-extracted' t.extractall(extracted_archive) for file in os.listdir(extracted_archive): file_path = extracted_archive / file with open(file_path, 'r') as f: for line in f.readlines(): line = line.strip() if len(line) > 2: encoding = tokenizer(line) features.append(torch.tensor(encoding)) while len(features) > n_tensors_per_file: feature_set_path = trg_dir / f'feature_set_{job_id}_{feature_index}.pt' torch.save(features[:n_tensors_per_file], feature_set_path) features = features[n_tensors_per_file:] feature_index += 1 feature_set_paths.append(feature_set_path) # Serialize if len(features) > 0: feature_set_path = trg_dir / f'feature_set_{job_id}_{feature_index}.pt' torch.save(features, feature_set_path) feature_set_paths.append(feature_set_path) return feature_set_paths @dataclass(frozen=True) class Args: src_dir: arg.Str = 'data/openwebtext' trg_dir: arg.Str = 'data/openwebtext_features' tmp_dir: arg.Str = '/tmp/owt' vocab_file: arg.Str = 'data/vocab.txt' n_dataset_building_processes: arg.Int = 32 n_tensors_per_file: arg.Int = 2048 max_seq_length: arg.Int = 128 def main(): args = arg.parse_to(Args) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) tokenizer = create_tokenizer(args.vocab_file) preprocess_owt(tokenizer=tokenizer, src_dir=args.src_dir, tmp_dir=args.tmp_dir, trg_dir=args.trg_dir, n_dataset_building_processes=args.n_dataset_building_processes, n_tensors_per_file=args.n_tensors_per_file, max_seq_length=args.max_seq_length) if __name__ == '__main__': main()
electra-pytorch-master
pretraining/openwebtext/preprocess.py
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes, the same as used for BERT.""" import collections import unicodedata def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) def printable_text(text): """Returns text encoded in a way suitable for print.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class FullTokenizer(object): """Runs end-to-end tokenziation.""" def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenziation.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer. Returns: A list of wordpiece tokens. """ text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
electra-pytorch-master
pretraining/openwebtext/tokenization.py
import math import os import random from dataclasses import dataclass from itertools import chain from functools import partial from pathlib import Path import numpy as np import torch import torch.utils.data from openwebtext import tokenization class ExampleBuilder: """Given a stream of input text, creates pretraining examples.""" def __init__(self, vocab, max_length): self._vocab = vocab self._current_sentences = [] self._current_length = 0 self._max_length = max_length self._target_length = max_length def add_line(self, bert_tokids): """Adds a line of text to the current example being built.""" # line = line.strip().replace("\n", " ") # if (not line) and self._current_length != 0: # empty lines separate docs # return self._create_example() # bert_tokens = self._tokenizer.tokenize(line) # bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens) self._current_sentences.append(bert_tokids) self._current_length += len(bert_tokids) if self._current_length >= self._target_length: return self._create_example() return None def _create_example(self): """Creates a pre-training example from the current list of sentences.""" # small chance to only have one segment as in classification tasks if random.random() < 0.1: first_segment_target_length = 100000 else: # -3 due to not yet having [CLS]/[SEP] tokens in the input text first_segment_target_length = (self._target_length - 3) // 2 first_segment = [] second_segment = [] for sentence in self._current_sentences: # the sentence goes to the first segment if (1) the first segment is # empty, (2) the sentence doesn't put the first segment over length or # (3) 50% of the time when it does put the first segment over length if (len(first_segment) == 0 or len(first_segment) + len(sentence) < first_segment_target_length or (len(second_segment) == 0 and len(first_segment) < first_segment_target_length and random.random() < 0.5)): first_segment += sentence else: second_segment += sentence # trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens first_segment = first_segment[:self._max_length - 2] second_segment = second_segment[:max(0, self._max_length - len(first_segment) - 3)] # prepare to start building the next example self._current_sentences = [] self._current_length = 0 # small chance for random-length instead of max_length-length example if random.random() < 0.05: self._target_length = random.randint(5, self._max_length) else: self._target_length = self._max_length return self._make_tf_example(first_segment, second_segment) def _make_tf_example(self, first_segment, second_segment): """Converts two "segments" of text into a tf.train.Example.""" vocab = self._vocab input_ids = [vocab["[CLS]"]] + first_segment + [vocab["[SEP]"]] segment_ids = [0] * len(input_ids) if second_segment: input_ids += second_segment + [vocab["[SEP]"]] segment_ids += [1] * (len(second_segment) + 1) input_mask = [1] * len(input_ids) input_ids += [0] * (self._max_length - len(input_ids)) input_mask += [0] * (self._max_length - len(input_mask)) segment_ids += [0] * (self._max_length - len(segment_ids)) def create_int_feature(tensors): return torch.tensor(tensors) tf_example = { "input_ids": create_int_feature(input_ids), "input_mask": create_int_feature(input_mask), "segment_ids": create_int_feature(segment_ids) } return tf_example class OpenWebTextDataset(torch.utils.data.IterableDataset): def __init__(self, feature_set_paths, n_tensors_per_file): self.feature_set_paths = feature_set_paths self.n_tensors_per_file = n_tensors_per_file @staticmethod def parse_file(file_index): try: features = torch.load(str(file_index)) yield from features except RuntimeError: raise RuntimeError(f'Corrupted file {file_index}') def __len__(self): return len(self.feature_set_paths) * self.n_tensors_per_file def __iter__(self): return chain.from_iterable(map(self.parse_file, self.feature_set_paths)) class ExampleBuilderDataset(torch.utils.data.IterableDataset): def __init__(self, dataset, builder): self.dataset = dataset self.builder = builder def __len__(self): return len(self.dataset) def __iter__(self): def create_example(): while True: token_ids = list(next(self.dataset).cpu().numpy()) example = self.builder.add_line(token_ids) if example: return example while True: yield create_example() def cycle(iterable): while True: for x in iterable: yield x def new_tokenizer(vocab_file, do_lower_case=True): return tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) def parse_tokenizer(tokenizer, text): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) def create_tokenizer(vocab_file, do_lower_case=True): tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) return partial(parse_tokenizer, tokenizer) def load_owt(owt_dir, n_tensors_per_file): owt_dir_path = Path(owt_dir) feature_set_paths = [owt_dir_path / feature_set_path for feature_set_path in os.listdir(owt_dir_path)] np.random.shuffle(feature_set_paths) assert len(feature_set_paths) > 0 return OpenWebTextDataset(feature_set_paths, n_tensors_per_file=n_tensors_per_file) def wrap_example_builder(dataset, vocab, max_length): return ExampleBuilderDataset(cycle(iter(dataset)), ExampleBuilder(vocab, max_length))
electra-pytorch-master
pretraining/openwebtext/dataset.py
import argparse import dataclasses __all__ = ('Arg', 'Int', 'Float', 'Bool', 'Str', 'Choice', 'parse_to') class Arg: def __init__(self, **kwargs): super().__init__() self.kwargs = kwargs class Int(Arg): def __init__(self, **kwargs): super().__init__(type=int, **kwargs) class Float(Arg): def __init__(self, **kwargs): super().__init__(type=float, **kwargs) class Bool(Arg): def __init__(self, **kwargs): super().__init__(type=bool, **kwargs) class Str(Arg): def __init__(self, **kwargs): super().__init__(type=str, **kwargs) class _MetaChoice(type): def __getitem__(self, item): return self(choices=list(item), type=item) class Choice(Arg, metaclass=_MetaChoice): def __init__(self, choices, **kwargs): super().__init__(choices=choices, **kwargs) def parse_to(container_class, **kwargs): def mangle_name(name): return '--' + name.replace('_', '-') parser = argparse.ArgumentParser(description=container_class.__doc__) for field in dataclasses.fields(container_class): name = field.name default = field.default value_or_class = field.type if isinstance(value_or_class, type): value = value_or_class(default=default) else: value = value_or_class value.kwargs['default'] = default parser.add_argument( mangle_name(name), **value.kwargs) arg_dict = parser.parse_args(**kwargs) return container_class(**vars(arg_dict))
electra-pytorch-master
pretraining/openwebtext/arg.py
from setuptools import setup, find_packages setup( name = 'flash-genomics-model', packages = find_packages(exclude=[]), version = '0.0.1', license='MIT', description = 'Flash Genomics Model (FGM)', author = 'Phil Wang', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/lucidrains/flash-genomics-model', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention mechanism', 'long context', 'genomics', 'pre-training' ], install_requires=[ 'einops>=0.6.1', 'MEGABYTE-pytorch', 'torch>=1.6', ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
flash-genomics-model-main
setup.py
from flash_genomics_model.flash_genomics_model import FlashGenomicsModel
flash-genomics-model-main
flash_genomics_model/__init__.py
from collections import namedtuple from functools import wraps from packaging import version import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # helpers def exists(val): return val is not None def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # main class class Attend(nn.Module): def __init__( self, causal = False, dropout = 0., flash = False ): super().__init__() self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) self.causal = causal self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def get_mask(self, i, j, device): return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1) def flash_attn(self, q, k, v, mask = None, attn_bias = None): _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # single headed key / values if k.ndim == 3: k = rearrange(k, 'b n d -> b 1 n d') if v.ndim == 3: v = rearrange(v, 'b n d -> b 1 n d') # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L if exists(mask) and mask.ndim != 4: mask = rearrange(mask, 'b j -> b 1 1 j') mask = mask.expand(-1, heads, q_len, -1) # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config causal = self.causal # handle attention bias if exists(attn_bias): mask_value = -torch.finfo(q.dtype).max // 2 causal_mask = self.get_mask(q_len, k_len, device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value) if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value) mask = attn_bias causal = False # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out def forward(self, q, k, v, mask = None, attn_bias = None): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ q_len, k_len, device = q.shape[-2], k.shape[-2], q.device scale = q.shape[-1] ** -0.5 kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' if self.flash: return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) # similarity sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale # attention bias if exists(attn_bias): sim = sim + attn_bias # causal mask if self.causal: causal_mask = self.get_mask(q_len, k_len, device) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) # attention attn = sim.softmax(dim=-1) attn = self.attn_dropout(attn) # aggregate values out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v) return out
flash-genomics-model-main
flash_genomics_model/attend.py
import torch import torch.nn.functional as F from torch import nn, einsum, Tensor from einops import rearrange, reduce from flash_genomics_model.attend import Attend # functions # attention class Attention(nn.Module): def __init__( self, dim, dim_head = 64, heads = 8, flash = True ): super().__init__() self.heads = heads dim_inner = heads * dim_head self.attend = Attend(flash = flash) self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False) self.to_out = nn.Linear(dim_inner, dim, bias = False) def forward( self, x, mask = None ): h = self.heads q, k, v = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) out = self.attend(q, k, v, mask = mask) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) # main class class FlashGenomicsModel(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x
flash-genomics-model-main
flash_genomics_model/flash_genomics_model.py
from setuptools import setup, find_packages setup( name = 'bioseq-clasp', packages = find_packages(), version = '0.0.1', license='MIT', description = 'CLASP - CLIP for biosequences and their annotation data', author = 'MicPie', author_email = '', url = 'https://github.com/MicPie/clasp', keywords = [ 'artificial intelligence', 'deep learning', 'contrastive learning', 'proteomics' ], install_requires=[ 'einops>=0.3', 'torch>=1.6' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
clasp-main
setup.py
import torch import torch.nn as nn from operator import itemgetter from torch.autograd.function import Function from torch.utils.checkpoint import get_device_states, set_device_states # for routing arguments into the functions of the reversible layer def route_args(router, args, depth): routed_args = [(dict(), dict()) for _ in range(depth)] matched_keys = [key for key in args.keys() if key in router] for key in matched_keys: val = args[key] for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])): new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes) routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args}) return routed_args # following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html class Deterministic(nn.Module): def __init__(self, net): super().__init__() self.net = net self.cpu_state = None self.cuda_in_fwd = None self.gpu_devices = None self.gpu_states = None def record_rng(self, *args): self.cpu_state = torch.get_rng_state() if torch.cuda._initialized: self.cuda_in_fwd = True self.gpu_devices, self.gpu_states = get_device_states(*args) def forward(self, *args, record_rng = False, set_rng = False, **kwargs): if record_rng: self.record_rng(*args) if not set_rng: return self.net(*args, **kwargs) rng_devices = [] if self.cuda_in_fwd: rng_devices = self.gpu_devices with torch.random.fork_rng(devices=rng_devices, enabled=True): torch.set_rng_state(self.cpu_state) if self.cuda_in_fwd: set_device_states(self.gpu_devices, self.gpu_states) return self.net(*args, **kwargs) # heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py # once multi-GPU is confirmed working, refactor and send PR back to source class ReversibleBlock(nn.Module): def __init__(self, f, g): super().__init__() self.f = Deterministic(f) self.g = Deterministic(g) def forward(self, x, f_args = {}, g_args = {}): x1, x2 = torch.chunk(x, 2, dim=2) y1, y2 = None, None with torch.no_grad(): y1 = x1 + self.f(x2, record_rng=self.training, **f_args) y2 = x2 + self.g(y1, record_rng=self.training, **g_args) return torch.cat([y1, y2], dim=2) def backward_pass(self, y, dy, f_args = {}, g_args = {}): y1, y2 = torch.chunk(y, 2, dim=2) del y dy1, dy2 = torch.chunk(dy, 2, dim=2) del dy with torch.enable_grad(): y1.requires_grad = True gy1 = self.g(y1, set_rng=True, **g_args) torch.autograd.backward(gy1, dy2) with torch.no_grad(): x2 = y2 - gy1 del y2, gy1 dx1 = dy1 + y1.grad del dy1 y1.grad = None with torch.enable_grad(): x2.requires_grad = True fx2 = self.f(x2, set_rng=True, **f_args) torch.autograd.backward(fx2, dx1, retain_graph=True) with torch.no_grad(): x1 = y1 - fx2 del y1, fx2 dx2 = dy2 + x2.grad del dy2 x2.grad = None x = torch.cat([x1, x2.detach()], dim=2) dx = torch.cat([dx1, dx2], dim=2) return x, dx class _ReversibleFunction(Function): @staticmethod def forward(ctx, x, blocks, args): ctx.args = args for block, kwarg in zip(blocks, args): x = block(x, **kwarg) ctx.y = x.detach() ctx.blocks = blocks return x @staticmethod def backward(ctx, dy): y = ctx.y args = ctx.args for block, kwargs in zip(ctx.blocks[::-1], args[::-1]): y, dy = block.backward_pass(y, dy, **kwargs) return dy, None, None class SequentialSequence(nn.Module): def __init__(self, layers, args_route = {}): super().__init__() assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers' self.layers = layers self.args_route = args_route def forward(self, x, **kwargs): args = route_args(self.args_route, kwargs, len(self.layers)) layers_and_args = list(zip(self.layers, args)) for (f, g), (f_args, g_args) in layers_and_args: x = x + f(x, **f_args) x = x + g(x, **g_args) return x class ReversibleSequence(nn.Module): def __init__(self, blocks, args_route = {}): super().__init__() self.args_route = args_route self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks]) def forward(self, x, **kwargs): x = torch.cat([x, x], dim=-1) blocks = self.blocks args = route_args(self.args_route, kwargs, len(blocks)) args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args)) out = _ReversibleFunction.apply(x, blocks, args) return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
clasp-main
clasp/reversible.py
import torch from torch import nn, einsum import torch.nn.functional as F class CLASP(nn.Module): def __init__( self, *, text_encoder, bioseq_encoder ): super().__init__() self.text_encoder = text_encoder self.bioseq_encoder = bioseq_encoder self.temperature = nn.Parameter(torch.tensor(1.)) def forward( self, text, bioseq, text_mask = None, bioseq_mask = None, return_loss = False ): b, device = text.shape[0], text.device text_latents = self.text_encoder(text, mask = text_mask) bioseq_latents = self.bioseq_encoder(bioseq, mask = bioseq_mask) text_latents, bioseq_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, bioseq_latents)) temp = self.temperature.exp() if not return_loss: sim = einsum('n d, n d -> n', text_latents, bioseq_latents) * temp return sim sim = einsum('i d, j d -> i j', text_latents, bioseq_latents) * temp labels = torch.arange(b, device = device) loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2 return loss.mean()
clasp-main
clasp/clasp.py
from clasp.clasp import CLASP from clasp.transformer import Transformer from clasp.simple_tokenizer import tokenize, VOCAB_SIZE from clasp.utils import basic_rand_sampler, basic_aa_tokenizer, CLASPDataset
clasp-main
clasp/__init__.py
import torch from torch.utils.data import Dataset, DataLoader import random import time def basic_rand_sampler(seq, sample_len): """ Basic random text sampler. If sample_len is greater than the length of the seq, the seq is returned. """ seq_len = len(seq) if seq_len > sample_len: start_idx = random.randint(0, min(seq_len,seq_len - sample_len)) end_idx = start_idx+sample_len return seq[start_idx:end_idx] else: return seq identity_sampler = lambda x: x def basic_aa_tokenizer(seq, context_length, return_mask=True): """ Maps a number between 0 and 21 to each 21 proteogenic aminoacids. Unknown char input gets mapped to 22. """ aa = "ACDEFGHIKLMNOPQRSTUVWY" d = {a: i for i, a in enumerate(aa)} seq_len = len(seq) seq_empty = torch.zeros(context_length - len(seq), dtype=torch.long) seq_tok = torch.tensor([d[a] if a in aa else 22 for a in seq], dtype=torch.long) seq = torch.cat([seq_tok, seq_empty], dim=0) if return_mask: mask = torch.zeros_like(seq).bool() mask[0:seq_len+1] = True return seq, mask else: return seq class CLASPDataset(Dataset): """ Basic CLASP dataset that loads the preprocessed csv file into RAM. path: path to the csv file """ def __init__(self, path, text_sampler, bioseq_sampler, text_tok, bioseq_tok): super().__init__() self.path = path tp = time.time() with open(path, "r") as reader: self.data = reader.readlines() print(f"Load data time: {time.time() - tp:.3f} s") self.cols = self.data.pop(0).split(",") self.len = len(self.data) self.text_sampler = text_sampler self.bioseq_sampler = bioseq_sampler self.text_tok = text_tok self.bioseq_tok = bioseq_tok def __len__(self): return self.len def __getitem__(self, idx): sample = self.data[idx][:-2] # without "\n" sample = sample.split(",") sample = [x for x in sample if len(x) > 0] text = " ".join(sample[:-2]) bioseq = sample[-1] text = self.text_sampler(text) bioseq = self.bioseq_sampler(bioseq) text, text_mask = self.text_tok(text) bioseq, bioseq_mask = self.bioseq_tok(bioseq) return text, text_mask, bioseq, bioseq_mask
clasp-main
clasp/utils.py
from functools import partial from itertools import islice, cycle from inspect import isfunction from math import ceil import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from clasp.positional import SinuEmb, apply_rotary_pos_emb from clasp.reversible import ReversibleSequence, SequentialSequence # helpers def exists(val): return val is not None def uniq(arr): return{el: True for el in arr}.keys() def cast_tuple(val, depth = 1): if isinstance(val, list): val = tuple(val) return val if isinstance(val, tuple) else (val,) * depth def default(val, d): if exists(val): return val return d() if isfunction(d) else d def max_neg_value(t): return -torch.finfo(t.dtype).max # attention class Attention(nn.Module): def __init__(self, dim, seq_len, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.seq_len = seq_len self.scale = dim_head ** -0.5 self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def apply_rel_pos_emb(self, q, k, rel_pos_emb = None): if not exists(rel_pos_emb): return q, k (cls_q, q), (cls_k, k) = map(lambda t: (t[:, :, :1], t[:, :, 1:]), (q, k)) q, k = apply_rotary_pos_emb(q, k, rel_pos_emb) q, k = map(lambda t: torch.cat(t, dim = -2), ((cls_q, q), (cls_k, k))) return q, k def forward(self, x, mask = None, rel_pos_emb = None): b, n, _, h, device = *x.shape, self.heads, x.device qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) if exists(rel_pos_emb): q, k = self.apply_rel_pos_emb(q, k, rel_pos_emb) dots = torch.einsum('b h i d, b h j d -> b h i j', q, k) * self.scale mask_value = max_neg_value(dots) if exists(mask): mask = rearrange(mask, 'b j -> b () () j') dots.masked_fill_(~mask, mask_value) del mask attn = dots.softmax(dim=-1) out = torch.einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out # microsoft sparse attention CUDA kernel class SparseAttention(Attention): def __init__( self, *args, block_size = 16, num_random_blocks = None, **kwargs ): super().__init__(*args, **kwargs) from deepspeed.ops.sparse_attention import SparseSelfAttention, VariableSparsityConfig self.block_size = block_size num_random_blocks = default(num_random_blocks, self.seq_len // block_size // 4) self.attn_fn = SparseSelfAttention( sparsity_config = VariableSparsityConfig( num_heads = self.heads, block = self.block_size, num_random_blocks = num_random_blocks, attention = 'unidirectional' if self.causal else 'bidirectional' ), max_seq_length = self.seq_len, attn_mask_mode = 'add' ) def forward(self, x, mask = None, rel_pos_emb = None): b, n, _, h, device = *x.shape, self.heads, x.device remainder = n % self.block_size mask = default(mask, lambda: torch.ones(b, n, device = device).bool()) if remainder > 0: padding = self.block_size - remainder x = F.pad(x, (0, 0, 0, padding), value = 0) mask = F.pad(mask, (0, padding), value = False) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) q, k = self.apply_rel_pos_emb(q, k, rel_pos_emb) key_pad_mask = None if exists(mask): key_pad_mask = ~mask out = self.attn_fn(q, k, v, key_padding_mask = key_pad_mask) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out[:, :n] # classes # https://arxiv.org/abs/2103.17239 class LayerScale(nn.Module): def __init__(self, dim, depth, fn): super().__init__() if depth <= 18: init_eps = 0.1 elif depth > 18 and depth <= 24: init_eps = 1e-5 else: init_eps = 1e-6 scale = torch.zeros(1, 1, dim).fill_(init_eps) self.scale = nn.Parameter(scale) self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) * self.scale class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class GEGLU(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim = -1) return x * F.gelu(gates) class FeedForward(nn.Module): def __init__(self, dim, dropout = 0., mult = 4.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, dim * mult * 2), GEGLU(), nn.Dropout(dropout), nn.Linear(dim * mult, dim) ) def forward(self, x): return self.net(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, dim, depth, seq_len, heads = 8, dim_head = 64, ff_mult = 4, attn_dropout = 0., ff_dropout = 0., attn_types = None, image_fmap_size = None, sparse_attn = False, rel_pos_emb = True, reversible = False ): super().__init__() self.token_emb = nn.Embedding(num_tokens, dim) self.cls_token = nn.Parameter(torch.randn(dim)) # positional embeddings self.pos_emb = SinuEmb(dim, seq_len + 1) self.rel_pos_emb = SinuEmb(dim_head, seq_len) if rel_pos_emb else None # layers layers = nn.ModuleList([]) sparse_layer = cast_tuple(sparse_attn, depth) attn_types = default(attn_types, ('full',)) attn_types = cast_tuple(attn_types) attn_type_layer = islice(cycle(attn_types), depth) for ind, sparse_attn, attn_type in zip(range(depth), sparse_layer, attn_type_layer): if attn_type == 'full': attn_class = Attention elif attn_type == 'sparse': attn_class = SparseAttention else: raise ValueError(f'attention type "{attn_type}" is not valid') layers.append(nn.ModuleList([ LayerScale(dim, ind + 1, PreNorm(dim, attn_class(dim, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout))), LayerScale(dim, ind + 1, PreNorm(dim, FeedForward(dim, mult = ff_mult, dropout = ff_dropout))) ])) execute_type = ReversibleSequence if reversible else SequentialSequence route_attn = ((True, False),) * depth attn_route_map = {'mask': route_attn, 'rel_pos_emb': route_attn} self.net = execute_type(layers, args_route = attn_route_map) self.layers = execute_type(layers) self.norm = nn.LayerNorm(dim) def forward(self, x, mask = None): b, n, device = *x.shape, x.device x = self.token_emb(x) rel_pos_emb = self.rel_pos_emb(x) if exists(self.rel_pos_emb) else None cls_tokens = repeat(self.cls_token, 'd -> b () d', b = b) x = torch.cat((cls_tokens, x), dim = 1) if exists(mask): mask = F.pad(mask, (1, 0), value = True) pos_emb = self.pos_emb(x) x += rearrange(pos_emb, 'n d -> () n d') x = self.net(x) return self.norm(x[:, 0])
clasp-main
clasp/transformer.py
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py import torch import html import os from functools import lru_cache from pathlib import Path import ftfy import regex as re VOCAB_SIZE = 49408 @lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt") @lru_cache() def bytes_to_unicode(): bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r'\s+', ' ', text) text = text.strip() return text class SimpleTokenizer(object): def __init__(self, bpe_path = default_bpe()): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} merges = Path(bpe_path).read_text(encoding='utf8').split('\n') merges = merges[1:49152 - 256 - 2 + 1] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = vocab + [v + '</w>' for v in vocab] for merge in merges: vocab.append(''.join(merge)) vocab.extend(['<|startoftext|>', '<|endoftext|>']) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for k, v in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + (token[-1] + '</w>',) pairs = get_pairs(word) if not pairs: return token + '</w>' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) return bpe_tokens def decode(self, tokens, remove_start_end = True): if remove_start_end: tokens = [token for token in tokens if token not in (49406, 40407, 0)] text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ') return text tokenizer = SimpleTokenizer() def tokenize(texts, context_length = 256, add_start = False, add_end = False, truncate_text = False, return_mask = False): if isinstance(texts, str): texts = [texts] sot_tokens = [tokenizer.encoder["<|startoftext|>"]] if add_start else [] eot_tokens = [tokenizer.encoder["<|endoftext|>"]] if add_end else [] all_tokens = [sot_tokens + tokenizer.encode(text) + eot_tokens for text in texts] result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): if len(tokens) > context_length: if truncate_text: tokens = tokens[:context_length] else: raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") result[i, :len(tokens)] = torch.tensor(tokens) if return_mask: return result, result != 0 return result
clasp-main
clasp/simple_tokenizer.py
import torch from torch import nn, einsum from einops import rearrange, repeat # rotary positional embedding helpers def rotate_every_two(x): x = rearrange(x, '... (d j) -> ... d j', j = 2) x1, x2 = x.unbind(dim = -1) x = torch.stack((-x2, x1), dim = -1) return rearrange(x, '... d j -> ... (d j)') def apply_rotary_pos_emb(q, k, sinu_pos): sinu_pos = rearrange(sinu_pos, 'n (j d) -> n j d', j = 2) sin, cos = sinu_pos.unbind(dim = -2) sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j = 2), (sin, cos)) q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k)) return q, k class SinuEmb(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) t = torch.arange(max_seq_len).type_as(inv_freq) sinusoid_inp = torch.einsum('i , j -> i j', t, inv_freq) emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim = -1) self.register_buffer('emb', emb) def forward(self, x): n = x.shape[1] return self.emb[:n]
clasp-main
clasp/positional.py
from datetime import datetime import os from pathlib import Path import subprocess import copy # Note: Run preprocess_data.py file in the main repository directory or the preproc directory of the repository. urls_download = ["https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz", "https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_trembl.dat.gz"] print(f"{datetime.now()} - Start downloading files.") path_current_dir = os.path.abspath(os.path.dirname(__file__)) path_data_dir = path_current_dir.split("/preproc")[0]+"/data" paths_raw_data = [] for url in urls_download: print(f"{datetime.now()} - Start downloading from: {url}") Path(path_data_dir).mkdir(exist_ok=True) subprocess.run(["wget", url, "-P", path_data_dir]) paths_raw_data.append(path_data_dir+"/"+url.split("/")[-1]) print(f"{datetime.now()} - Download finished.") print(f"{datetime.now()} - Decompress downloaded files.") paths_data = [] for path_raw in paths_raw_data: path_decomp = path_raw.split(".gz")[0] print(f"{datetime.now()} - Decompress: {path_raw}") subprocess.run(["gunzip", path_raw, path_decomp]) paths_data.append(path_decomp) print(f"{datetime.now()} - Decompressed to: {path_decomp}") print(f"{datetime.now()} - Preprocessing files and saving to csv.") # Raw data setup see user manual: https://web.expasy.org/docs/userman.html linetype_conversion = { "ID": "id", "AC": "accn", # accession number "DT": "date", "DE": "desc", # DEscription "GN": "gene", # Gene Name "OS": "spec", # Organism Species "OG": "orga", # OrGanelle "OC": "clas", # Organism Classification "OX": "taxo", # Organism taxonomy cross-reference "OH": "host", # Organism Host "RN": "refn", # Reference Number "RP": "refp", # Reference Position "RC": "refc", # Reference Comment "RX": "refx", # Reference cross-reference "RG": "refg", # Reference Group "RA": "refa", # Reference Author "RT": "reft", # Reference Title "RL": "refl", # Reference Location "CC": "text", # free text comments "DR": "xdb", # Database cross-Reference "FT": "xns", # Cross-references to the nucleotide sequence database # RECHECK "PE": "exist", # Protein existence "KW": "kw", # KeyWord "FT": "ft", # Feature Table "SQ": "seqh", # SeQuence header) " ": "seq", } preprocessing_fields = ["id","accn","date","desc","gene","spec","orga","clas","taxo","host","refn", "refp", "refc", "refx", "refg", "refa", "reft", "refl", "text","xdb","ft","exist","kw","seqh","seq"] def get_csv(path, fields): path_out = path.split(".")[0]+".csv" print(f"{datetime.now()} - Processing: {path}") print(f"{datetime.now()} - Saving to: {path_out}") print("Processing file line:") i = 0 data = {k: "" for k in fields} with open(path, 'r') as rf, open(path_out, 'w') as wf: while True: if i == 0: # write header to csv header = ",".join(fields)+"\n" wf.write(header) if i % 1_000_000 == 0: print(i, end=", ") i += 1 rline = rf.readline() if rline.startswith("CC ----") or \ rline.startswith("CC Copy") or \ rline.startswith("CC Dist"): continue elif rline == "": # EOF is empty string print(f"\n{datetime.now()} - Processing complete.") break elif rline.startswith("//"): # end of entry, save line to csv file for key in data.keys(): if key == "seq": data[key] = data[key].replace(" ","") # remove spaces in AA sequence wline = ",".join([x.replace(",",";") for x in data.values()])+"\n" wf.write(wline) data = {k: "" for k in fields} # create new empty data dict continue key = linetype_conversion[rline[:2]] # get line key content = " ".join(rline[5:].split()) # get line content data[key] += content if data[key] == "" else " "+content return path_out paths_csv = [] for path in paths_data: path_out = get_csv(path, fields=preprocessing_fields) paths_csv.append(path_out) print(f"{datetime.now()} - Preprocessed file saved to: {path_out}") #print(f"{datetime.now()} - Getting string lengths for every column.") #cols = copy.deepcopy(preprocessing_fields) #cols.append("text_all") # #def get_cols_len_csv(path, cols): # path_out = path.split(".")[0]+"_len.csv" # print(f"Processing: {path}") # print(f"Saving to: {path_out}") # i = 0 # with open(path, 'r') as rf, open(path_out, 'w') as wf: # while True: # if i % 1_000_000 == 0: # print(i, end=", ") # i += 1 # # line = rf.readline() # if line == "": # EOF is an empty string # break # # line = line.replace("\n","").split(",") # # if i == 1: # get index values for the wanted columns # idx = dict() # for c in cols: # if c == "text_all": # continue # idx[c] = line.index(c) # # wline = ",".join(cols)+"\n" # write header # wf.write(wline) # continue # # out = [] # text_all = 0 # for c in cols: # if c == "id": # out.append(line[idx[c]].split(" ")[0]) # elif c == "text_all": # out.append(str(text_all)) # else: # length = len(line[idx[c]]) # text_all += length # out.append(str(length)) # # wline = ",".join(out)+"\n" # wf.write(wline) # return path_out # #for path in paths_csv: # path_out = get_cols_len_csv(path, cols) # print(f"{datetime.now()} - String lengths data saved to: {path_out}") print(f"{datetime.now()} - Merging preprocessed csv files into one csv file.") path_csv_full = path_data_dir+"/uniprot_full.csv" subprocess.run(["cat", paths_csv[0], f"<(tail +2 {paths_csv[1]})", ">", path_csv_full]) print(f"{datetime.now()} - Merged files saved to: {path_csv_full}") print(f"{datetime.now()} - Data preprocessing done.")
clasp-main
preproc/preprocess_data.py
from datetime import datetime import os from pathlib import Path import subprocess import copy import re # Note: Run preprocess_data.py file in the main repository directory or the preproc directory of the repository. urls_download = ["https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz", "https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_trembl.dat.gz"] print(f"{datetime.now()} - Start downloading files.") path_current_dir = os.path.abspath(os.path.dirname(__file__)) path_data_dir = path_current_dir.split("/preproc")[0]+"/data" paths_raw_data = [] for url in urls_download: print(f"{datetime.now()} - Start downloading from: {url}") Path(path_data_dir).mkdir(exist_ok=True) subprocess.run(["wget", url, "-P", path_data_dir]) paths_raw_data.append(path_data_dir+"/"+url.split("/")[-1]) print(f"{datetime.now()} - Download finished.") print(f"{datetime.now()} - Decompress downloaded files.") paths_data = [] for path_raw in paths_raw_data: path_decomp = path_raw.split(".gz")[0] print(f"{datetime.now()} - Decompress: {path_raw}") subprocess.run(["gunzip", path_raw, path_decomp]) paths_data.append(path_decomp) print(f"{datetime.now()} - Decompressed to: {path_decomp}") print(f"{datetime.now()} - Preprocessing files and saving to csv.") # Raw data setup see user manual: https://web.expasy.org/docs/userman.html linetype_conversion = { "ID": "id", "AC": "accn", # accession number "DT": "date", "DE": "desc", # DEscription "GN": "gene", # Gene Name "OS": "spec", # Organism Species "OG": "orga", # OrGanelle "OC": "clas", # Organism Classification "OX": "taxo", # Organism taxonomy cross-reference "OH": "host", # Organism Host "RN": "refn", # Reference Number "RP": "refp", # Reference Position "RC": "refc", # Reference Comment "RX": "refx", # Reference cross-reference "RG": "refg", # Reference Group "RA": "refa", # Reference Author "RT": "reft", # Reference Title "RL": "refl", # Reference Location "CC": "text", # free text comments "DR": "xdb", # Database cross-Reference "FT": "xns", # Cross-references to the nucleotide sequence database # RECHECK "PE": "exist", # Protein existence "KW": "kw", # KeyWord "FT": "ft", # Feature Table "SQ": "seqh", # SeQuence header) " ": "seq", } preprocessing_fields = ["id","accn","date","desc","gene","spec","orga","clas","taxo","host","refn", "refp", "refc", "refx", "refg", "refa", "reft", "refl", "text","xdb","ft","exist","kw","seqh","seq"] def get_csv(path, fields): path_out = path.split(".")[0]+".csv" print(f"{datetime.now()} - Processing: {path}") print(f"{datetime.now()} - Saving to: {path_out}") print("Processing file line:") i = 0 data = {k: "" for k in fields} with open(path, 'r') as rf, open(path_out, 'w') as wf: while True: if i == 0: # write header to csv header = ",".join(fields)+"\n" wf.write(header) if i % 1_000_000 == 0: print(i, end=", ") i += 1 rline = rf.readline() if rline.startswith("CC ----") or \ rline.startswith("CC Copy") or \ rline.startswith("CC Dist") or \ rline.startswith("CC -!- CAUTION: The sequence shown here is derived from an EMBL/GenBank/DDBJ") or \ rline.startswith("CC whole genome shotgun (WGS) entry which is preliminary data.") or \ rline.startswith("DR") or \ rline.startswith("DT") or \ rline.startswith("RX") or \ rline.startswith("RL") or \ rline.startswith("OX") or \ rline.startswith("RN"): continue elif rline == "": # EOF is empty string print(f"\n{datetime.now()} - Processing complete.") break elif rline.startswith("//"): # end of entry, save line to csv file for key in data.keys(): data[key] = re.sub(r"\s*{.*}\s*", " ", data[key]) # Remove curly braces incl. their content if key == "seq": data[key] = data[key].replace(" ","") # remove spaces in AA sequence if key == "seqh": data[key] = ";".join(data[key].split(";")[:-2]) # Remove CRC64 wline = ",".join([x.replace(",",";") for x in data.values()])+"\n" wf.write(wline) data = {k: "" for k in fields} # create new empty data dict continue key = linetype_conversion[rline[:2]] # get line key content = " ".join(rline[5:].split()) # get line content data[key] += content if data[key] == "" else " "+content return path_out paths_csv = [] for path in paths_data: path_out = get_csv(path, fields=preprocessing_fields) paths_csv.append(path_out) print(f"{datetime.now()} - Preprocessed file saved to: {path_out}") #print(f"{datetime.now()} - Getting string lengths for every column.") #cols = copy.deepcopy(preprocessing_fields) #cols.append("text_all") # #def get_cols_len_csv(path, cols): # path_out = path.split(".")[0]+"_len.csv" # print(f"Processing: {path}") # print(f"Saving to: {path_out}") # i = 0 # with open(path, 'r') as rf, open(path_out, 'w') as wf: # while True: # if i % 1_000_000 == 0: # print(i, end=", ") # i += 1 # # line = rf.readline() # if line == "": # EOF is an empty string # break # # line = line.replace("\n","").split(",") # # if i == 1: # get index values for the wanted columns # idx = dict() # for c in cols: # if c == "text_all": # continue # idx[c] = line.index(c) # # wline = ",".join(cols)+"\n" # write header # wf.write(wline) # continue # # out = [] # text_all = 0 # for c in cols: # if c == "id": # out.append(line[idx[c]].split(" ")[0]) # elif c == "text_all": # out.append(str(text_all)) # else: # length = len(line[idx[c]]) # text_all += length # out.append(str(length)) # # wline = ",".join(out)+"\n" # wf.write(wline) # return path_out # #for path in paths_csv: # path_out = get_cols_len_csv(path, cols) # print(f"{datetime.now()} - String lengths data saved to: {path_out}") print(f"{datetime.now()} - Merging preprocessed csv files into one csv file.") path_csv_full = path_data_dir+"/uniprot_full.csv" subprocess.run(["cat", paths_csv[0], f"<(tail +2 {paths_csv[1]})", ">", path_csv_full]) print(f"{datetime.now()} - Merged files saved to: {path_csv_full}") print(f"{datetime.now()} - Data preprocessing done.")
clasp-main
preproc/preprocess_data_reduced.py
from setuptools import setup, find_packages setup( name = 'memorizing-transformers-pytorch', packages = find_packages(exclude=[]), version = '0.4.1', license='MIT', description = 'Memorizing Transformer - Pytorch', long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = '[email protected]', url = 'https://github.com/lucidrains/memorizing-transformers-pytorch', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'memory', 'retrieval' ], install_requires=[ 'einops>=0.6', 'filelock', 'joblib', 'faiss-gpu', 'numpy', 'torch>=1.6', ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
memorizing-transformers-pytorch-main
setup.py
from memorizing_transformers_pytorch import MemorizingTransformer import random import tqdm import gzip import numpy as np import torch import torch.optim as optim from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset # constants NUM_BATCHES = int(1e5) BATCH_SIZE = 16 SEQ_LEN = 512 SEGMENTS = 5 LEARNING_RATE = 2e-4 MAX_GRAD_CLIP_NORM = 0.5 VALIDATE_EVERY = 100 GENERATE_EVERY = 500 GENERATE_LENGTH = 512 # helpers def cycle(loader): while True: for data in loader: yield data def decode_token(token): return str(chr(max(32, token))) def decode_tokens(tokens): return ''.join(list(map(decode_token, tokens))) # instantiate GPT-like decoder model model = MemorizingTransformer( num_tokens = 256, dim = 512, depth = 8, memorizing_layers = 4, max_knn_memories = 512 * 15, num_retrieved_memories = 32, xl_memory_layers = (7, 8), xl_max_memories = 512, ).cuda() # prepare enwik8 data with gzip.open('./data/enwik8.gz') as file: X = np.fromstring(file.read(int(95e6)), dtype=np.uint8) trX, vaX = np.split(X, [int(90e6)]) data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX) class TextSamplerDataset(Dataset): def __init__(self, data, seq_len): super().__init__() self.data = data self.seq_len = seq_len def __getitem__(self, index): rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,)) full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long() return full_seq.cuda() def __len__(self): return self.data.size(0) // self.seq_len # dataset and dataloader train_dataset = TextSamplerDataset(data_train, SEQ_LEN * SEGMENTS) train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE, drop_last = True)) valid_dataset = TextSamplerDataset(data_val, SEQ_LEN * SEGMENTS) valid_loader = cycle(DataLoader(valid_dataset, batch_size = BATCH_SIZE, drop_last = True)) # optimizer optim = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE) # training for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10., desc = 'training'): model.train() data = next(train_loader) train_loss = 0. with model.knn_memories_context(batch_size = BATCH_SIZE) as knn_memories: xl_memories = None seq, labels = data[:, :-1], data[:, 1:] for seq_segment, labels_segment in zip(seq.chunk(SEGMENTS, dim = -1), labels.chunk(SEGMENTS, dim = -1)): loss, xl_memories = model( seq_segment, labels = labels_segment, knn_memories = knn_memories, xl_memories = xl_memories ) train_loss += loss.item() / SEGMENTS (loss / SEGMENTS).backward() print(f'training loss: {train_loss}') torch.nn.utils.clip_grad_norm_(model.parameters(), MAX_GRAD_CLIP_NORM) optim.step() optim.zero_grad() if not (i % VALIDATE_EVERY): model.eval() valid_data = next(valid_loader) valid_loss = 0. with torch.no_grad(), model.knn_memories_context(batch_size = BATCH_SIZE) as knn_memories: xl_memories = None seq, labels = data[:, :-1], data[:, 1:] for seq_segment, labels_segment in zip(seq.chunk(SEGMENTS, dim = -1), labels.chunk(SEGMENTS, dim = -1)): loss, xl_memories = model( seq_segment, labels = labels_segment, knn_memories = knn_memories, xl_memories = xl_memories ) valid_loss += loss.item() / SEGMENTS print(f'valid loss: {valid_loss}')
memorizing-transformers-pytorch-main
train.py
from memorizing_transformers_pytorch.memorizing_transformers_pytorch import MemorizingTransformer, KNNAttention from memorizing_transformers_pytorch.knn_memory import KNNMemory
memorizing-transformers-pytorch-main
memorizing_transformers_pytorch/__init__.py
import math from functools import partial from contextlib import contextmanager from pathlib import Path from filelock import FileLock import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from einops_exts import repeat_many from einops.layers.torch import Rearrange from memorizing_transformers_pytorch.knn_memory import KNNMemoryList, DEFAULT_KNN_MEMORY_MEMMAP_DIRECTORY # helper functions def identity(t): return t def exists(val): return val is not None def unique(arr): return list({el: True for el in arr}.keys()) def default(val, d): return val if exists(val) else d def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) def l2norm(t): return F.normalize(t, dim = -1) # helper classes class PreNormResidual(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn self.norm = nn.LayerNorm(dim) def forward(self, x, **kwargs): out = self.fn(self.norm(x), **kwargs) if not isinstance(out, tuple): return out + x head, *tail = out return (head + x, *tail) # t5 relative positional bias class T5RelativePositionBias(nn.Module): def __init__( self, scale, num_buckets = 32, max_distance = 128, heads = 8 ): super().__init__() self.scale = scale self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket( relative_position, num_buckets = 32, max_distance = 128 ): n = -relative_position n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + (torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) return torch.where(is_small, n, val_if_large) def forward(self, i, j, *, device): q_pos = torch.arange(i, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1') rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> () h i j') return bias * self.scale # feedforward class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Linear(dim, dim * mult), nn.GELU(), nn.Dropout(dropout), nn.Linear(dim * mult, dim) ) def forward(self, x): return self.net(x) # attention class Attention(nn.Module): def __init__( self, *, dim, heads = 8, dim_head = 64, dropout = 0., xl_max_memories = 0., ): super().__init__() self.heads = heads self.scale = dim_head ** -0.5 inner_dim = heads * dim_head self.xl_max_memories = xl_max_memories self.dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, dim_head * 2, bias = False) self.to_out = nn.Linear(inner_dim, dim) def forward(self, x, *, xl_memory = None, rel_pos_bias = None): h, device = self.heads, x.device q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1)) q = rearrange(q, 'b n (h d) -> b h n d', h = h) q = q * self.scale if exists(xl_memory): k_xl_mem, v_xl_mem = xl_memory.unbind(dim = -2) k = torch.cat((k_xl_mem, k), dim = -2) v = torch.cat((v_xl_mem, v), dim = -2) sim = einsum('b h i d, b j d -> b h i j', q, k) i, j = sim.shape[-2:] if exists(rel_pos_bias): sim = rel_pos_bias[..., -i:, -j:] + sim causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) attn = sim.softmax(dim = -1) attn = self.dropout(attn) out = einsum('b h i j, b j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') # new xl memories new_kv_memories = torch.stack((k, v), dim = -2).detach() if self.xl_max_memories > 0: new_xl_kv_memories = new_kv_memories[:, -self.xl_max_memories:] else: new_xl_kv_memories = None return self.to_out(out), new_xl_kv_memories # approximate nearest neighbor attention class KNNAttention(nn.Module): def __init__( self, *, dim, heads = 8, dim_head = 64, dropout = 0., num_retrieved_memories = 32, xl_max_memories = 0., attn_scale_init = 20, gate_output = False ): super().__init__() self.heads = heads self.scale = nn.Parameter(torch.ones(heads, 1, 1) * math.log(attn_scale_init)) inner_dim = heads * dim_head self.xl_max_memories = xl_max_memories self.num_retrieved_memories = num_retrieved_memories self.dropout = nn.Dropout(dropout) self.knn_mem_dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, dim_head * 2, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) self.output_gate = nn.Parameter(torch.zeros(1)) if gate_output else None def forward( self, x, *, knn_memory, xl_memory = None, add_knn_memory = True, rel_pos_bias = None ): b, n, h, device = *x.shape[:2], self.heads, x.device q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1)) q = rearrange(q, 'b n (h d) -> b h n d', h = h) # in paper, they showed normalizing of keys led to more stable training # we'll just go with full cosine sim attention https://arxiv.org/abs/2010.04245 q, k = map(l2norm, (q, k)) # handle xl memory if exists(xl_memory): k_xl_mem, v_xl_mem = xl_memory.unbind(dim = -2) k = torch.cat((k_xl_mem, k), dim = -2) v = torch.cat((v_xl_mem, v), dim = -2) # calculate local attention scale = self.scale.exp() sim = einsum('b h i d, b j d -> b h i j', q, k) * scale i, j = sim.shape[-2:] if exists(rel_pos_bias): sim = rel_pos_bias[..., -i:, -j:] + sim mask_value = -torch.finfo(sim.dtype).max causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1) sim = sim.masked_fill(causal_mask, mask_value) # calculate knn attention over memory, if index is passed in mem_kv, mem_mask = knn_memory.search(q, self.num_retrieved_memories) mem_k, mem_v = mem_kv.unbind(dim = -2) sim_mem = einsum('b h i d, b h i j d -> b h i j', q, mem_k) * scale sim_mem = sim_mem.masked_fill(~mem_mask, mask_value) # calculate new XL memories, as well as memories to be discarded new_kv_memories = torch.stack((k, v), dim = -2).detach() if self.xl_max_memories > 0: new_kv_memories_discarded, new_xl_kv_memories = new_kv_memories[:, :-self.xl_max_memories], new_kv_memories[:, -self.xl_max_memories:] else: new_kv_memories_discarded, new_xl_kv_memories = new_kv_memories, None # add memories to be discarded into KNN memory if add_knn_memory and new_kv_memories_discarded.numel() > 0: knn_memory.add(new_kv_memories_discarded) # attention (combining local and distant) sim = torch.cat((sim_mem, sim), dim = -1) attn = sim.softmax(dim = -1) attn = self.dropout(attn) local_attn, mem_attn = attn[..., self.num_retrieved_memories:], attn[..., :self.num_retrieved_memories] local_out = einsum('b h i j, b j d -> b h i d', local_attn, v) mem_out = einsum('b h i j, b h i j d -> b h i d', mem_attn, mem_v) out = local_out + mem_out # combine heads and project out out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) # use flamingo styled gating of output, so that memorizing transformers can be gated into an existing LLM # preparation to add this to block-recurrent-transformer-pytorch, for the pinnacle of long context attention network if exists(self.output_gate): out = out * self.output_gate.tanh() return out, new_xl_kv_memories # main class class MemorizingTransformer(nn.Module): def __init__( self, *, num_tokens, dim, depth, dim_head = 64, heads = 8, knn_attn_heads = None, attn_dropout = 0., ff_mult = 4, ff_dropout = 0., memorizing_layers = None, max_knn_memories = 250000, num_retrieved_memories = 32, clear_memories_on_sos_token_id = None, clear_memories_on_eos_token_id = None, knn_memories_directory = DEFAULT_KNN_MEMORY_MEMMAP_DIRECTORY, shift_knn_memories_down = 0., pad_id = 0, xl_max_memories = 0, xl_memory_layers = None, shift_xl_memories_down = 0., knn_memory_multiprocessing = False ): super().__init__() self.token_emb = nn.Embedding(num_tokens, dim) self.pad_id = pad_id block_wrapper = partial(PreNormResidual, dim) valid_layers = set(range(1, depth + 1)) memorizing_layers = default(memorizing_layers, (depth // 2,)) # default KNN attention layer to midpoint of transformer memorizing_layers = cast_tuple(memorizing_layers) memorizing_layers = tuple(filter(lambda i: i in valid_layers, memorizing_layers)) self.dim_head = dim_head knn_attn_heads = default(knn_attn_heads, heads) # xl memory hyperparameter if xl_max_memories > 0: xl_memory_layers = default(xl_memory_layers, tuple(range(1, depth + 1))) xl_memory_layers = unique(xl_memory_layers) self.xl_memory_layers = tuple(filter(lambda i: i in valid_layers, xl_memory_layers)) self.num_xl_memory_layers = len(self.xl_memory_layers) else: self.xl_memory_layers = tuple() self.num_xl_memory_layers = 0 # knn memory hyperparameters self.max_knn_memories = max_knn_memories self.knn_memories_directory = knn_memories_directory self.memorizing_layers = unique(memorizing_layers) self.num_memory_layers = len(memorizing_layers) self.clear_memories_on_sos_token_id = clear_memories_on_sos_token_id self.clear_memories_on_eos_token_id = clear_memories_on_eos_token_id # relative positional bias self.rel_pos_bias = T5RelativePositionBias(scale = dim_head ** 0.5, heads = heads) self.knn_rel_pos_bias = T5RelativePositionBias(scale = dim_head ** 0.5, heads = heads) # layers self.layers = nn.ModuleList([]) for idx in range(depth): layer_num = idx + 1 use_xl_memories = layer_num in self.xl_memory_layers use_knn_attention = layer_num in memorizing_layers xl_max_memories_layer = 0 if not use_xl_memories else xl_max_memories if use_knn_attention: attn = KNNAttention(dim = dim, dim_head = dim_head, heads = knn_attn_heads, dropout = attn_dropout, num_retrieved_memories = num_retrieved_memories, xl_max_memories = xl_max_memories_layer) else: attn = Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, xl_max_memories = xl_max_memories_layer) self.layers.append(nn.ModuleList([ block_wrapper(attn), block_wrapper(FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)), ])) # memory layer shifting # from a little known paper https://arxiv.org/abs/2012.15688 self.shift_knn_memories_down = shift_knn_memories_down self.shift_xl_memories_down = shift_xl_memories_down # to logits self.to_logits = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_tokens) ) # knn memories init self.knn_mem_kwargs = dict( dim = self.dim_head, max_memories = self.max_knn_memories, multiprocessing = knn_memory_multiprocessing ) def create_knn_memories( self, *, batch_size ): return KNNMemoryList.create_memories( batch_size = batch_size, num_memory_layers = self.num_memory_layers, memories_directory = self.knn_memories_directory, )(**self.knn_mem_kwargs) @contextmanager def knn_memories_context( self, **kwargs ): knn_dir = Path(self.knn_memories_directory) knn_dir.mkdir(exist_ok = True, parents = True) lock = FileLock(str(knn_dir / 'mutex')) with lock: knn_memories = self.create_knn_memories(**kwargs) yield knn_memories knn_memories.cleanup() def clear_memory(self, x, token_id): """ clears the KNN memories based on if the batch row contains the specified token id """ """ for auto-clearing KNN memories based on start and end of strings """ clear_memory = (x == token_id).any(dim = -1) batch_indices, _ = clear_memory.nonzero(as_tuple = True) batch_indices_to_clear = batch_indices.tolist() if len(batch_indices_to_clear) == 0: return knn_memories.clear_memory(batch_indices_to_clear) def forward( self, x, knn_memories, xl_memories = None, labels = None, add_knn_memory = True ): batch_size, seq_len, *_, device = *x.shape, x.device x = self.token_emb(x) # validate KNN memories to have enough indices for batch size assert all([memory.num_indices == batch_size for memory in knn_memories]), f'you passed in an input with batch size {batch_size} but your memories were not instantiated with that number of KNN indices' # if KNN memories are passed in, and researcher wants memories auto-cleared on <sos> token detection # do the appropriate logic if exists(self.clear_memories_on_sos_token_id): self.clear_memory(x, self.clear_memories_on_sos_token_id) # handle XL memories xl_memories = default(xl_memories, (None,) * self.num_xl_memory_layers) assert len(xl_memories) == self.num_xl_memory_layers has_xl_memories = len(xl_memories) > 0 # shifting memories a number of layers down, little known technique shown to enhance memories from Ernie-Doc paper if len(knn_memories) > 0 and self.shift_knn_memories_down > 0: knn_memories = [*knn_memories[self.shift_knn_memories_down:], *knn_memories[:self.shift_knn_memories_down]] if len(xl_memories) > 0 and self.shift_xl_memories_down > 0: xl_memories = [*xl_memories[self.shift_xl_memories_down:], *xl_memories[:self.shift_xl_memories_down]] # iterate through the memories in order of the ascending layers that contain KNNAttention xl_memories_iter = iter(xl_memories) knn_memories_iter = iter(knn_memories) # positional bias max_context_len = max([seq_len, *map(lambda t: (t.shape[-3] if exists(t) else 0) + seq_len, xl_memories)]) rel_pos_bias = self.rel_pos_bias(seq_len, max_context_len, device = device) knn_rel_pos_bias = self.knn_rel_pos_bias(seq_len, max_context_len, device = device) # keep track of new xl memories new_xl_memories = [] if has_xl_memories else None # go through all layers for ind, (attn, ff) in enumerate(self.layers): layer_num = ind + 1 is_memorizing_layer = layer_num in self.memorizing_layers is_xl_memory_layer = layer_num in self.xl_memory_layers attn_kwargs = dict(rel_pos_bias = rel_pos_bias if not is_memorizing_layer else knn_rel_pos_bias) if is_memorizing_layer: attn_kwargs = {**attn_kwargs, 'knn_memory': next(knn_memories_iter), 'add_knn_memory': add_knn_memory} if is_xl_memory_layer: attn_kwargs = {**attn_kwargs, 'xl_memory': next(xl_memories_iter)} # attention x, xl_mem = attn(x, **attn_kwargs) # add new XL memories if needed if exists(xl_mem): new_xl_memories.append(xl_mem) # feedforward x = ff(x) # to logits logits = self.to_logits(x) # auto-clear KNN memories on end of string token if exists(self.clear_memories_on_eos_token_id): self.clear_memory(x, self.clear_memories_on_eos_token_id) # for training if not exists(labels): if exists(new_xl_memories): return logits, new_xl_memories return logits loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels, ignore_index = self.pad_id) if exists(new_xl_memories): return loss, new_xl_memories return loss
memorizing-transformers-pytorch-main
memorizing_transformers_pytorch/memorizing_transformers_pytorch.py
import os import math import torch import faiss import numpy as np from pathlib import Path from functools import wraps from contextlib import ExitStack, contextmanager from einops import rearrange, pack, unpack # multiprocessing from joblib import Parallel, delayed, cpu_count # constants FAISS_INDEX_GPU_ID = int(os.getenv('FAISS_INDEX_GPU_ID', 0)) DEFAULT_KNN_MEMORY_MEMMAP_DIRECTORY = './.tmp/knn.memories' # helper functions def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_list(val): return val if isinstance(val, list) else [val] def all_el_unique(arr): return len(set(arr)) == len(arr) @contextmanager def multi_context(*cms): with ExitStack() as stack: yield [stack.enter_context(cls) for cls in cms] def count_intersect(x, y): # returns an array that shows how many times an element in x is contained in tensor y return np.sum(rearrange(x, 'i -> i 1') == rearrange(y, 'j -> 1 j'), axis = -1) def check_shape(tensor, pattern, **kwargs): return rearrange(tensor, f"{pattern} -> {pattern}", **kwargs) # a wrapper around faiss IndexIVFFlat # taking care of expiring old keys automagically class KNN(): def __init__( self, dim, max_num_entries, cap_num_entries = False, M = 15, keep_stats = False ): index = faiss.IndexHNSWFlat(dim, M, faiss.METRIC_INNER_PRODUCT) self.index = index self.max_num_entries = max_num_entries self.cap_num_entries = cap_num_entries self.is_trained = False self.keep_stats = keep_stats self.reset() def __del__(self): if hasattr(self, 'index'): del self.index def reset(self): self.ids = np.empty((0,), dtype = np.int32) if self.keep_stats: self.hits = np.empty((0,), dtype = np.int32) self.age_num_iterations = np.empty((0,), dtype = np.int32) self.ages_since_last_hit = np.empty((0,), dtype = np.int32) self.index.reset() self.is_trained = False def train(self, x): self.index.train(x) self.is_trained = True def add(self, x, ids): if not self.is_trained: self.train(x) self.ids = np.concatenate((ids, self.ids)) if self.keep_stats: self.hits = np.concatenate((np.zeros_like(ids), self.hits)) self.age_num_iterations = np.concatenate((np.zeros_like(ids), self.age_num_iterations)) self.ages_since_last_hit = np.concatenate((np.zeros_like(ids), self.ages_since_last_hit)) if self.cap_num_entries and len(self.ids) > self.max_num_entries: self.reset() return self.index.add(x) def search( self, x, topk, nprobe = 8, return_distances = False, increment_hits = False, increment_age = True ): if not self.is_trained: return np.full((x.shape[0], topk), -1) distances, indices = self.index.search(x, k = topk) if increment_hits and self.keep_stats: hits = count_intersect(self.ids, rearrange(indices, '... -> (...)')) self.hits += hits self.ages_since_last_hit += 1 self.ages_since_last_hit *= (hits == 0) if increment_age and self.keep_stats: self.age_num_iterations += 1 if return_distances: return indices, distances return indices # KNN memory layer, where one can store key / value memories # can automatically take care of a collection of faiss indices (across batch dimension) class KNNMemory(): def __init__( self, dim, max_memories = 16000, num_indices = 1, memmap_filename = './knn.memory.memmap', multiprocessing = True ): self.dim = dim self.num_indices = num_indices self.scoped_indices = list(range(num_indices)) self.max_memories = max_memories self.shape = (num_indices, max_memories, 2, dim) self.db_offsets = np.zeros(num_indices, dtype = np.int32) self.db = np.memmap(memmap_filename, mode = 'w+', dtype = np.float32, shape = self.shape) self.knns = [KNN(dim = dim, max_num_entries = max_memories, cap_num_entries = True) for _ in range(num_indices)] self.n_jobs = cpu_count() if multiprocessing else 1 def set_scoped_indices(self, indices): indices = list(indices) assert all_el_unique(indices), f'all scoped batch indices must be unique, received: {indices}' assert all([0 <= i < self.num_indices for i in indices]), f'each batch index must be between 0 and less than {self.num_indices}: received {indices}' self.scoped_indices = indices @contextmanager def at_batch_indices(self, indices): prev_indices = self.scoped_indices self.set_scoped_indices(indices) yield self self.set_scoped_indices(prev_indices) def clear(self, batch_indices = None): if not exists(batch_indices): batch_indices = list(range(self.num_indices)) batch_indices = cast_list(batch_indices) for index in batch_indices: knn = self.knns[index] knn.reset() self.db_offsets[batch_indices] = 0 def add(self, memories): check_shape(memories, 'b n kv d', d = self.dim, kv = 2, b = len(self.scoped_indices)) memories = memories.detach().cpu().numpy() memories = memories[:, -self.max_memories:] num_memories = memories.shape[1] knn_insert_ids = np.arange(num_memories) keys = np.ascontiguousarray(memories[..., 0, :]) knns = [self.knns[i] for i in self.scoped_indices] db_offsets = [self.db_offsets[i] for i in self.scoped_indices] # use joblib to insert new key / value memories into faiss index @delayed def knn_add(knn, key, db_offset): knn.add(key, ids = knn_insert_ids + db_offset) return knn updated_knns = Parallel(n_jobs = self.n_jobs)(knn_add(*args) for args in zip(knns, keys, db_offsets)) for knn_idx, scoped_idx in enumerate(self.scoped_indices): self.knns[scoped_idx] = updated_knns[knn_idx] # add the new memories to the memmap "database" add_indices = (rearrange(np.arange(num_memories), 'j -> 1 j') + rearrange(self.db_offsets[list(self.scoped_indices)], 'i -> i 1')) % self.max_memories self.db[rearrange(np.array(self.scoped_indices), 'i -> i 1'), add_indices] = memories self.db.flush() self.db_offsets += num_memories def search( self, queries, topk, nprobe = 8, increment_hits = True, increment_age = True ): check_shape(queries, 'b ... d', d = self.dim, b = len(self.scoped_indices)) queries, ps = pack([queries], 'b * d') device = queries.device queries = queries.detach().cpu().numpy() all_masks = [] all_key_values = [] knns = [self.knns[i] for i in self.scoped_indices] # parallelize faiss search @delayed def knn_search(knn, query): return knn.search(query, topk, nprobe, increment_hits = increment_hits, increment_age = increment_age) fetched_indices = Parallel(n_jobs = self.n_jobs)(knn_search(*args) for args in zip(knns, queries)) # get all the memory key / values from memmap 'database' # todo - remove for loop below for batch_index, indices in zip(self.scoped_indices, fetched_indices): mask = indices != -1 db_indices = np.where(mask, indices, 0) all_masks.append(torch.from_numpy(mask)) key_values = self.db[batch_index, db_indices % self.max_memories] all_key_values.append(torch.from_numpy(key_values)) all_masks = torch.stack(all_masks) all_key_values = torch.stack(all_key_values) all_key_values = all_key_values.masked_fill(~rearrange(all_masks, '... -> ... 1 1'), 0.) all_key_values, = unpack(all_key_values, ps, 'b * n kv d') all_masks, = unpack(all_masks, ps, 'b * n') return all_key_values.to(device), all_masks.to(device) def __del__(self): if hasattr(self, 'knns'): for knn in self.knns: del knn del self.db # extends list with some extra methods for collections of KNN memories class KNNMemoryList(list): def cleanup(self): for memory in self: del memory @classmethod def create_memories( self, *, batch_size, num_memory_layers, memories_directory = DEFAULT_KNN_MEMORY_MEMMAP_DIRECTORY ): memories_path = Path(memories_directory) memories_path.mkdir(exist_ok = True, parents = True) def inner(*args, **kwargs): return self([KNNMemory(*args, num_indices = batch_size, memmap_filename = str(memories_path / f'knn.memory.layer.{ind + 1}.memmap'), **kwargs) for ind in range(num_memory_layers)]) return inner @contextmanager def at_batch_indices( self, indices ): knn_batch_indices_contexts = [memory.at_batch_indices(indices) for memory in self] with multi_context(*knn_batch_indices_contexts): yield def clear_memory( self, batch_indices = None, memory_indices = None ): memory_indices = default(memory_indices, tuple(range(len(self)))) for memory_index in memory_indices: memory = self[memory_index] memory.clear(batch_indices)
memorizing-transformers-pytorch-main
memorizing_transformers_pytorch/knn_memory.py
from setuptools import setup, find_packages setup( name = 'segformer-pytorch', packages = find_packages(), version = '0.0.6', license='MIT', description = 'Segformer - Pytorch', author = 'Phil Wang', author_email = '[email protected]', url = 'https://github.com/lucidrains/segformer-pytorch', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'image segmentation' ], install_requires=[ 'einops>=0.3', 'torch>=1.6' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
segformer-pytorch-main
setup.py
from segformer_pytorch.segformer_pytorch import Segformer
segformer-pytorch-main
segformer_pytorch/__init__.py
from math import sqrt from functools import partial import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, reduce from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth # classes class DsConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding, stride = 1, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) class LayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt() mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (std + self.eps) * self.g + self.b class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn self.norm = LayerNorm(dim) def forward(self, x): return self.fn(self.norm(x)) class EfficientSelfAttention(nn.Module): def __init__( self, *, dim, heads, reduction_ratio ): super().__init__() self.scale = (dim // heads) ** -0.5 self.heads = heads self.to_q = nn.Conv2d(dim, dim, 1, bias = False) self.to_kv = nn.Conv2d(dim, dim * 2, reduction_ratio, stride = reduction_ratio, bias = False) self.to_out = nn.Conv2d(dim, dim, 1, bias = False) def forward(self, x): h, w = x.shape[-2:] heads = self.heads q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = 1)) q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = heads), (q, k, v)) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale attn = sim.softmax(dim = -1) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) (x y) c -> b (h c) x y', h = heads, x = h, y = w) return self.to_out(out) class MixFeedForward(nn.Module): def __init__( self, *, dim, expansion_factor ): super().__init__() hidden_dim = dim * expansion_factor self.net = nn.Sequential( nn.Conv2d(dim, hidden_dim, 1), DsConv2d(hidden_dim, hidden_dim, 3, padding = 1), nn.GELU(), nn.Conv2d(hidden_dim, dim, 1) ) def forward(self, x): return self.net(x) class MiT(nn.Module): def __init__( self, *, channels, dims, heads, ff_expansion, reduction_ratio, num_layers ): super().__init__() stage_kernel_stride_pad = ((7, 4, 3), (3, 2, 1), (3, 2, 1), (3, 2, 1)) dims = (channels, *dims) dim_pairs = list(zip(dims[:-1], dims[1:])) self.stages = nn.ModuleList([]) for (dim_in, dim_out), (kernel, stride, padding), num_layers, ff_expansion, heads, reduction_ratio in zip(dim_pairs, stage_kernel_stride_pad, num_layers, ff_expansion, heads, reduction_ratio): get_overlap_patches = nn.Unfold(kernel, stride = stride, padding = padding) overlap_patch_embed = nn.Conv2d(dim_in * kernel ** 2, dim_out, 1) layers = nn.ModuleList([]) for _ in range(num_layers): layers.append(nn.ModuleList([ PreNorm(dim_out, EfficientSelfAttention(dim = dim_out, heads = heads, reduction_ratio = reduction_ratio)), PreNorm(dim_out, MixFeedForward(dim = dim_out, expansion_factor = ff_expansion)), ])) self.stages.append(nn.ModuleList([ get_overlap_patches, overlap_patch_embed, layers ])) def forward( self, x, return_layer_outputs = False ): h, w = x.shape[-2:] layer_outputs = [] for (get_overlap_patches, overlap_embed, layers) in self.stages: x = get_overlap_patches(x) num_patches = x.shape[-1] ratio = int(sqrt((h * w) / num_patches)) x = rearrange(x, 'b c (h w) -> b c h w', h = h // ratio) x = overlap_embed(x) for (attn, ff) in layers: x = attn(x) + x x = ff(x) + x layer_outputs.append(x) ret = x if not return_layer_outputs else layer_outputs return ret class Segformer(nn.Module): def __init__( self, *, dims = (32, 64, 160, 256), heads = (1, 2, 5, 8), ff_expansion = (8, 8, 4, 4), reduction_ratio = (8, 4, 2, 1), num_layers = 2, channels = 3, decoder_dim = 256, num_classes = 4 ): super().__init__() dims, heads, ff_expansion, reduction_ratio, num_layers = map(partial(cast_tuple, depth = 4), (dims, heads, ff_expansion, reduction_ratio, num_layers)) assert all([*map(lambda t: len(t) == 4, (dims, heads, ff_expansion, reduction_ratio, num_layers))]), 'only four stages are allowed, all keyword arguments must be either a single value or a tuple of 4 values' self.mit = MiT( channels = channels, dims = dims, heads = heads, ff_expansion = ff_expansion, reduction_ratio = reduction_ratio, num_layers = num_layers ) self.to_fused = nn.ModuleList([nn.Sequential( nn.Conv2d(dim, decoder_dim, 1), nn.Upsample(scale_factor = 2 ** i) ) for i, dim in enumerate(dims)]) self.to_segmentation = nn.Sequential( nn.Conv2d(4 * decoder_dim, decoder_dim, 1), nn.Conv2d(decoder_dim, num_classes, 1), ) def forward(self, x): layer_outputs = self.mit(x, return_layer_outputs = True) fused = [to_fused(output) for output, to_fused in zip(layer_outputs, self.to_fused)] fused = torch.cat(fused, dim = 1) return self.to_segmentation(fused)
segformer-pytorch-main
segformer_pytorch/segformer_pytorch.py
from setuptools import setup, find_packages exec(open('equiformer_pytorch/version.py').read()) setup( name = 'equiformer-pytorch', packages = find_packages(exclude=[]), version = __version__, license='MIT', description = 'Equiformer - SE3/E3 Graph Attention Transformer for Molecules and Proteins', author = 'Phil Wang', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/lucidrains/equiformer-pytorch', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention mechanism', 'equivariance', 'molecules', 'proteins' ], install_requires=[ 'beartype', 'einops>=0.6', 'filelock', 'opt-einsum', 'torch>=1.6', ], setup_requires=[ 'pytest-runner', ], tests_require=[ 'pytest' ], include_package_data = True, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
equiformer-pytorch-main
setup.py