diff --git a/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..7f30faec771d656bd247e0cdc31dd59feeb3010c --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255b1e73d291ef09187bd33a97bef7e0a99d45a78d2c50ef8c5373492ef112a0 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ba27e0df948f85ccb76e273d13d6084c85690489 --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d91b7c85c573039abde02e42940f67d909e777dcd2921634e46998eb5e35ed25 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d70ce0d5310fe297ec8a4b76d774ee1acc18f772 --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abe02cc8d63470ba51c596e8fabac7b894dedb01b303de7bace6acb43d8b93b7 +size 9372 diff --git a/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..58ae7d43c9b01490b9827fe5f6142b53e8fa5667 --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a15e7453f2541b71d2984e7da86894f6e2cae3c37cd00dd4bb072e0e142bc7cc +size 9387 diff --git a/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..e8cf64753c0d514137582afee72ed8ca0c12f0a4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/6.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43c4bbc0928e7a01beda5b136cfc1ce0227c43088b057671a39987f7ee304358 +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..567366fec11415a08c57638090a0f5f5f366c006 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cab9a87c02513fdfc6db8e673e6c9c154ff70d5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d1890134ab582202246be8ef03526d9118725ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dd636dc9163a31cc161ab39315a96bd5e8ad674 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..933db8d308fff48b41f2476439e8527b27886db3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658db2bdc8f568878f427890598bddc2423a4af4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f479b60301f71c48e609f9ccc4d7b3d1c23052e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a46daba10c10bed2444f1b343354008e80a971b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5627be9dcc5d7c456c96b6d935c848e0f8124729 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94fc30258846a6dab7c12e278b94a873037ee397 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..691a7b221d1741bbe87df74721b7744198b37a23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..168f40c0fb347d437aedc2c8036059a7cc20ab21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e74dad6725c2ac97d34bb4db538b25e9ffc5072 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bcd54860ca084edc86698db39188d4d7a14a594 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b81aaebe3c0bae71b8afbbff30fb9954042e4ab2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be397df62b915d646af501b36eaadd2db247a7fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..162d9c5ae19b1b7c80e1b6cdcb0461688a6dc6d5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..616b88a64a15f1e02d4c86aadb82fd65cae033cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bff4919814b49e32cf8c17e303e6f4022ca8a8c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04db3fa033d37c6fec3cf097ebf59b5333afa28c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e3469191bf4af9328df0b0c7cc9ad4027e1e245 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/functions.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..ab58107524a95b5eb11785debe2ee3251f01cc85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_sympy/functions.py @@ -0,0 +1,353 @@ +import sympy +from sympy import S +from sympy.core.logic import fuzzy_and, fuzzy_not, fuzzy_or + +__all__ = [ + "FloorDiv", "ModularIndexing", "CleanDiv", "CeilDiv", "Pow", "TrueDiv", + "LShift", "RShift", "IsNonOverlappingAndDenseIndicator", "Round", "RoundDecimal", +] + + +def fuzzy_eq(x, y): + if None in (x, y): + return None + return x == y + + +class FloorDiv(sympy.Function): + """ + We maintain this so that: + 1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b. + 2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b) + """ + nargs = (2,) + precedence = 50 # precedence of mul # noqa: F811 + + # Default return type for SymPy assumptions. + # https://docs.sympy.org/latest/guides/assumptions.html#implementing-assumptions-handlers + is_real = True + + @property + def base(self): + return self.args[0] + + @property + def divisor(self): + return self.args[1] + + def _sympystr(self, printer): + base = printer.parenthesize(self.base, self.precedence) + divisor = printer.parenthesize(self.divisor, self.precedence) + return f"({base}//{divisor})" + + # SymPy assumptions based on argument types. + def _eval_is_real(self): + return fuzzy_or([self.base.is_real, self.divisor.is_real]) + + def _eval_is_integer(self): + return fuzzy_and([self.base.is_integer, self.divisor.is_integer]) + + # Automatic evaluation. + # https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval + @classmethod + def eval(cls, base, divisor): + def check_supported_type(x): + if (x.is_integer is False and x.is_real is False and x.is_complex) or x.is_Boolean: + raise TypeError( + f"unsupported operand type(s) for //: " + f"'{type(base).__name__}' and '{type(divisor).__name__}'" + f", expected integer or real") + + check_supported_type(base) + check_supported_type(divisor) + + # We don't provide the same error message as in Python because SymPy + # makes it difficult to check the types. + if divisor.is_zero: + raise ZeroDivisionError("division by zero") + + if base.is_zero: + return sympy.S.Zero + if base.is_integer and divisor == 1: + return base + if base.is_real and divisor == 1: + return sympy.floor(base) + if base.is_integer and divisor == -1: + return sympy.Mul(base, -1) + if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer): + return base // divisor + if isinstance(base, (sympy.Integer, sympy.Float)) and isinstance(divisor, (sympy.Integer, sympy.Float)): + return sympy.floor(base / divisor) + if isinstance(base, FloorDiv): + return FloorDiv(base.args[0], base.args[1] * divisor) + if isinstance(divisor, sympy.Rational) and divisor.p == 1: + return sympy.floor(base * divisor.q) + + if isinstance(base, sympy.Add): + for a in base.args: + gcd = sympy.gcd(a, divisor) + if gcd == divisor: + return FloorDiv(base - a, divisor) + a / gcd + + try: + gcd = sympy.gcd(base, divisor) + if gcd != 1: + return FloorDiv( + sympy.simplify(base / gcd), sympy.simplify(divisor / gcd) + ) + except sympy.PolynomialError: + pass # https://github.com/pytorch/pytorch/issues/108276 + + +class ModularIndexing(sympy.Function): + """ + ModularIndexing(a, b, c) => (a // b) % c where % is the C modulus + """ + + nargs = (3,) + is_integer = True + + @classmethod + def eval(cls, base, divisor, modulus): + if base == 0 or modulus == 1: + return sympy.Integer(0) + + if ( + isinstance(base, sympy.Integer) + and isinstance(divisor, sympy.Integer) + and isinstance(modulus, sympy.Integer) + ): + return (base // divisor) % modulus + + try: + if divisor != 1: + gcd = sympy.gcd(base, divisor) + if gcd != 1: + return ModularIndexing( + sympy.simplify(base / gcd), sympy.simplify(divisor / gcd), modulus + ) + except sympy.PolynomialError: + pass # https://github.com/pytorch/pytorch/issues/108276 + + if isinstance(base, sympy.Add): + new_terms = [] + all_positive = True + for term in base.args: + if sympy.gcd(term, modulus * divisor) != modulus * divisor: + if (isinstance(term, sympy.Integer) and term < 0) or ( + isinstance(term, sympy.Mul) + and isinstance(term.args[0], sympy.Integer) + and term.args[0] < 0 + ): + # workaround for https://github.com/openai/triton/issues/619, + # if there are negative terms, // produces wrong result + # TODO if https://github.com/openai/triton/issues/619 is fixed + # this optimization would become valid + all_positive = False + break + else: + new_terms.append(term) + + if len(new_terms) != len(base.args) and all_positive: + return ModularIndexing(sum(new_terms), divisor, modulus) + + if isinstance(base, FloorDiv): + return ModularIndexing(base.args[0], base.args[1] * divisor, modulus) + + def _eval_is_nonnegative(self): + p, q = self.args[:2] + return fuzzy_eq(p.is_nonnegative, q.is_nonnegative) # type: ignore[attr-defined] + + def _eval_is_positive(self): + p, q = self.args[:2] + return fuzzy_eq(p.is_positive, q.is_positive) # type: ignore[attr-defined] + + +class Where(sympy.Function): + """ + Good ol' ternary operator + """ + + nargs = (3,) + + @classmethod + def eval(cls, c, p, q): + if c == sympy.true: + return p + elif c == sympy.false: + return q + +class Mod(sympy.Function): + """ + We maintain this so that we avoid SymPy correctness issues, such as: + https://github.com/sympy/sympy/issues/25146 + """ + + nargs = (2,) + + @classmethod + def eval(cls, p, q): + # This was adapted from: sympy/core/mod.py + + if q.is_zero: + raise ZeroDivisionError("Modulo by zero") + # If either of them is NaN or infinite. + if p is S.NaN or q is S.NaN or p.is_finite is False or q.is_finite is False: + return S.NaN + # Three cases: + # 1. p == 0 + # 2. p is either q or -q + # 3. p is integer and q == 1 + if p is S.Zero or p in (q, -q) or (p.is_integer and q == 1): + return S.Zero + + # Evaluate if they are both literals. + if q.is_Number and p.is_Number: + return p % q + + # If q == 2, it's a matter of whether p is odd or even. + if q.is_Number and q == 2: + if p.is_even: + return S.Zero + if p.is_odd: + return S.One + + # If p is a multiple of q. + r = p / q + if r.is_integer: + return S.Zero + + # If p < q and its ratio is positive, then: + # - floor(p / q) = 0 + # - p % q = p - floor(p / q) * q = p + less = p < q + if less.is_Boolean and bool(less) and r.is_positive: + return p + + def _eval_is_integer(self): + p, q = self.args + return fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]) # type: ignore[attr-defined] + + def _eval_is_nonnegative(self): + return True if self.args[1].is_positive else None # type: ignore[attr-defined] + + def _eval_is_nonpositive(self): + return True if self.args[1].is_negative else None # type: ignore[attr-defined] + + +class CleanDiv(FloorDiv): + """ + Div where we can assume no rounding. + This is to enable future optimizations. + """ + + pass + + +class CeilDiv(sympy.Function): + """ + Div used in indexing that rounds up. + """ + + is_integer = True + + def __new__(cls, base, divisor): + if sympy.gcd(base, divisor) == divisor: + return CleanDiv(base, divisor) + else: + return FloorDiv(base + (divisor - 1), divisor) + + +class LShift(sympy.Function): + @classmethod + def eval(cls, base, shift): + if shift < 0: + raise ValueError('negative shift count') + return base * 2 ** shift + + +class RShift(sympy.Function): + @classmethod + def eval(cls, base, shift): + if shift < 0: + raise ValueError('negative shift count') + return base // 2 ** shift + +# Overloaded to be compatible with regular Python. +# https://github.com/pytorch/pytorch/issues/90900 +class Pow(sympy.Function): + @classmethod + def eval(cls, base, exp): + if exp.is_zero: + return sympy.Integer(1) + elif base.is_zero and exp < 0: + raise ZeroDivisionError(f"{base} cannot be raised to a negative power") + else: + return base ** exp + +# Overloaded to be compatible with regular Python. +# https://github.com/pytorch/pytorch/issues/90900 +class TrueDiv(sympy.Function): + @classmethod + def eval(cls, base, divisor): + if divisor.is_zero: + raise ZeroDivisionError("division by zero") + else: + return base / divisor + + +# TODO: As an indicator, this != 0 implies == 1 (and vice versa). +# Because we do not have the ability to guard on the stride permutation +# at the moment, it is hard to make further inferences when this is true, +# as although we know the tensor is contiguous in *some* layout, we don't +# know which one (however, you could, for example, make the inference that +# reshaping this to a 1D tensor can be guard-free.) +class IsNonOverlappingAndDenseIndicator(sympy.Function): + is_integer = True + + @classmethod + def eval(cls, *args): + assert len(args) % 2 == 0 + dim = len(args) // 2 + # TODO: it is possible to make progress evaluating this guard + # even if not all of the inputs are known. For example, a 2D + # tensor with non-0/1 sizes but strides (0, 1) is definitely + # false, because we know its numel > 1 but it's broadcasted + # in dim 0. + if all(isinstance(a, sympy.Integer) for a in args): + # sym_node imported in torch.__init__. Local import to avoid an import cycle + from torch.fx.experimental.symbolic_shapes import eval_is_non_overlapping_and_dense + + size_args = args[0:dim] + stride_args = args[dim:] + return eval_is_non_overlapping_and_dense( + [int(a) for a in size_args], + [int(a) for a in stride_args] + ) + return None + + +class Round(sympy.Function): + is_integer = True + + @classmethod + def eval(cls, number): + if number.is_integer: + return number + elif isinstance(number, sympy.Number): + return sympy.Integer(round(float(number))) + + def __int__(self): + # This will only ever be called when computing size hints. At that point, self.args[0] should be a number and + # no longer an expression. If it were, the float call would fail and the caller would handle this further. + return round(float(self.args[0])) # type: ignore[arg-type] + + +class RoundDecimal(sympy.Function): + @classmethod + def eval(cls, number, ndigits): + if number.is_integer and ndigits >= 0: + return number + elif isinstance(number, sympy.Number) and isinstance(ndigits, sympy.Integer): + value_type, output_type = (int, sympy.Integer) if isinstance(number, sympy.Integer) else (float, sympy.Float) + return output_type(round(value_type(number), int(ndigits))) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/interp.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/interp.py new file mode 100644 index 0000000000000000000000000000000000000000..86515b6b1aa77ed3b075e2a4ab48c1bd2570957a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_sympy/interp.py @@ -0,0 +1,118 @@ +""" +This is a simple interpreter for Sympy expressions that dispatches to +classes following the torch._inductor.virtualized calling convention. +For directness, the interpreter takes the handler directly rather than +consulting the TLS. It does not use most of the methods on the full +handler; only those with corresponding Sympy expressions. To see an example +of a full handler, see torch.utils._sympy.value_ranges.ValueRangeAnalysis. +""" + +import functools +from typing import Any, Dict, Union + +import sympy +from sympy.logic.boolalg import Boolean as SympyBoolean, BooleanAtom + +import torch +from .functions import ( + CleanDiv, + FloorDiv, + IsNonOverlappingAndDenseIndicator, + Mod, + ModularIndexing, + Pow, + Round, + RoundDecimal, + TrueDiv, + Where, +) + + +# TODO: Dedupe this with SYMPY_INTERP + + +@functools.lru_cache(None) +def handlers(): + # TODO add CeilDiv (it doesn't appear in the index_expr) + + # TODO default to some decompositions if the interpreter doesn't have them + # like decomposing ModularIndexing or implementing Le(a,b) as Ge(b, a) + + HANDLERS = { + sympy.Or: "or_", + sympy.And: "and_", + sympy.Eq: "eq", + sympy.Ne: "ne", + sympy.Lt: "lt", + sympy.Gt: "gt", + sympy.Le: "le", + sympy.Ge: "ge", + sympy.Not: "not_", + TrueDiv: "truediv", + FloorDiv: "floordiv", + CleanDiv: "div", + Where: "where", + sympy.Add: "add", + sympy.Mul: "mul", + Pow: "pow", + sympy.Pow: "pow", + Mod: "mod", + sympy.Mod: "mod", + sympy.Abs: "abs", + sympy.log: "log", + sympy.exp: "exp", + sympy.floor: "floor", + sympy.ceiling: "ceil", + sympy.Min: "minimum", + sympy.Max: "maximum", + ModularIndexing: "modular_indexing", + sympy.functions.elementary.piecewise.ExprCondPair: "expr_cond_pair", + sympy.Piecewise: "piecewise", + IsNonOverlappingAndDenseIndicator: "is_non_overlapping_and_dense_indicator", + Round: "round", + RoundDecimal: "round", + } + for name in ["cos", "sin", "tan", "sinh", "cosh", "tanh", "asin", "acos", "atan"]: + HANDLERS[getattr(sympy, name)] = name + + return HANDLERS + + +ASSOCIATIVE_OPS = {"minimum", "maximum", "mul", "add", "and_", "or_"} + + +def sympy_interp( + analysis, env: Dict[sympy.Symbol, Any], expr: Union[sympy.Expr, SympyBoolean] +): + # Handle base cases + dtype = None + if isinstance(expr, BooleanAtom): + dtype = torch.bool + elif isinstance(expr, sympy.Integer): + dtype = torch.int64 + elif isinstance(expr, sympy.Number): + dtype = torch.double + + if dtype is not None: + return analysis.constant(expr, dtype) + elif isinstance(expr, sympy.Symbol): + return env[expr] + + # Special cases + if isinstance(expr, sympy.Pow) and isinstance( + expr.args[1], sympy.core.numbers.Half + ): + return analysis.sqrt(sympy_interp(analysis, env, expr.args[0])) + + # Recursive case + args = [sympy_interp(analysis, env, arg) for arg in expr.args] # type: ignore[arg-type] + handler_name = handlers()[expr.func] + handler = getattr(analysis, handler_name) + if handler_name in ASSOCIATIVE_OPS: + assert len(args) > 1 + acc = handler(args[0], args[1]) + for i in range(2, len(args)): + acc = handler(acc, args[i]) + return acc + else: + return handler(*args) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/reference.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/reference.py new file mode 100644 index 0000000000000000000000000000000000000000..adb25c7ffb0fe60b3a752e52654052d7fa74dc3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_sympy/reference.py @@ -0,0 +1,214 @@ +import math + +import sympy + +import torch + + +# The sympy interpretation of operators. It will also sometimes work with +# plain int/float, but if you do certain operations you will get out a +# sympy.Basic in the end. If you want the Python/FX traceable interpretation, +# check PythonReferenceAnalysis. +# NB: For magic methods this needs to use normal magic methods +# so that test_magic_methods works +class ReferenceAnalysis: + @staticmethod + def constant(c, dtype): + return sympy.sympify(c) + + @staticmethod + def or_(a, b): + return a | b + + @staticmethod + def and_(a, b): + return a & b + + @staticmethod + def eq(a, b): + if isinstance(a, sympy.Expr) or isinstance(b, sympy.Expr): + return sympy.Eq(a, b) + return a == b + + @classmethod + def ne(cls, a, b): + return cls.not_(cls.eq(a, b)) + + @staticmethod + def lt(a, b): + return a < b + + @staticmethod + def gt(a, b): + return a > b + + @staticmethod + def le(a, b): + return a <= b + + @staticmethod + def ge(a, b): + return a >= b + + @staticmethod + def not_(a): + assert not isinstance(a, bool) + return ~a + + @staticmethod + def reciprocal(x): + return 1 / x + + @staticmethod + def square(x): + return x * x + + @staticmethod + def mod(x, y): + return x % y + + @staticmethod + def abs(x): + return abs(x) + + @staticmethod + def neg(x): + return -x + + @staticmethod + def truediv(a, b): + return a / b + + @staticmethod + def div(a, b): + return ReferenceAnalysis.truediv(a, b) + + @staticmethod + def floordiv(a, b): + if b == 0: + return sympy.nan if a == 0 else sympy.zoo + return a // b + + @staticmethod + def truncdiv(a, b): + result = a / b + if result.is_finite: + result = sympy.Integer(result) + + return result + + @staticmethod + def add(a, b): + return a + b + + @staticmethod + def mul(a, b): + return a * b + + @staticmethod + def sub(a, b): + return a - b + + @staticmethod + def exp(x): + return sympy.exp(x) + + @staticmethod + def log(x): + return sympy.log(x) + + @staticmethod + def sqrt(x): + return sympy.sqrt(x) + + @staticmethod + def pow(a, b): + return a**b + + @staticmethod + def minimum(a, b): + # Poorman's version of upcasting in Sympy + # This won't do for sympy.Expr as the casting does nothing for those + if a.is_Float or not a.is_finite or b.is_Float or not b.is_finite: + result_type = sympy.Float + else: + assert a.is_Integer + assert b.is_Integer + result_type = sympy.Integer + return sympy.Min(result_type(a), result_type(b)) + + @staticmethod + def maximum(a, b): + # Poorman's version of upcasting in Sympy + # This won't do for sympy.Expr as the casting does nothing for those + if a.is_Float or not a.is_finite or b.is_Float or not b.is_finite: + result_type = sympy.Float + else: + assert a.is_Integer + assert b.is_Integer + result_type = sympy.Integer + return sympy.Max(result_type(a), result_type(b)) + + @staticmethod + def floor(x): + return sympy.floor(x) + + @staticmethod + def ceil(x): + return sympy.ceiling(x) + + +# Unlike ReferenceAnalysis, does NOT sympyify, instead, works with plain +# Python types and is FX traceable. Inheritance here is purely for code +# sharing (TODO: considering splitting out a BaseReferenceAnalysis). +class PythonReferenceAnalysis(ReferenceAnalysis): + @staticmethod + def constant(c, dtype): + if dtype is torch.int64: + return int(c) + elif dtype is torch.double: + return float(c) + elif dtype is torch.bool: + return bool(c) + else: + raise AssertionError(f"unrecognized dtype {dtype}") + + @staticmethod + def not_(a): + return torch.sym_not(a) + + @staticmethod + def floordiv(a, b): + return a // b + + @staticmethod + def truncdiv(a, b): + return a / b + + @staticmethod + def exp(x): + raise AssertionError("exp is not valid shape sympy expr") + + @staticmethod + def log(x): + raise AssertionError("log is not valid shape sympy expr") + + @staticmethod + def sqrt(x): + return torch._sym_sqrt(x) # type: ignore[attr-defined] + + @staticmethod + def minimum(a, b): + return torch.sym_min(a, b) + + @staticmethod + def maximum(a, b): + return torch.sym_max(a, b) + + @staticmethod + def floor(x): + return math.floor(x) + + @staticmethod + def ceil(x): + return math.ceil(x) diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py new file mode 100644 index 0000000000000000000000000000000000000000..870bda554e74808d9423e1d79ebf2dfbdee93f91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py @@ -0,0 +1,94 @@ +import sympy +from sympy.multipledispatch import dispatch + +__all__ = ["SingletonInt"] + + +class SingletonInt(sympy.AtomicExpr): + # This is probably not super important unless we are in multiple dispatch + # situations with other more exotic Expr types. + _op_priority = 99999 + + def __new__(cls, *args, coeff=None, **kwargs): + instance = super().__new__(cls, *args, **kwargs) + return instance + + # The semantics of this class should match that of NestedIntSymNodeImpl in + # c10/core/NestedIntSymNodeImpl.h + def __init__(self, val, *, coeff=1): + self._val = val + self._coeff = coeff + super().__init__() + + # See NOTE [ Inequalities with nested int ] + def _eval_Eq(self, other): + if ( + isinstance(other, SingletonInt) + and other._val == self._val + and self._coeff == other._coeff + ): + return sympy.true + else: + return sympy.false + + # This is necessary so that calling expr.free_symbols on exprs that contain + # this Singleton does not error + @property + def free_symbols(self): + return set() + + def __mul__(self, other): + if isinstance(other, SingletonInt): + raise ValueError( + "SingletonInt cannot be multiplied by another SingletonInt" + ) + return SingletonInt(self._val, coeff=self._coeff * other) + + def __rmul__(self, other): + if isinstance(other, SingletonInt): + raise ValueError( + "SingletonInt cannot be multiplied by another SingletonInt" + ) + return SingletonInt(self._val, coeff=self._coeff * other) + + # Make sure we promptly raise an error instead of falling back to building + # an expression tree. There are probably more ops, how can we be exhaustive? + def __add__(self, other): + raise NotImplementedError("NYI") + + def __sub__(self, other): + raise NotImplementedError("NYI") + + def __truediv__(self, other): + raise NotImplementedError("NYI") + + def __floordiv__(self, other): + raise NotImplementedError("NYI") + + def __mod__(self, other): + raise NotImplementedError("NYI") + + +# See NOTE [ Inequalities with nested int ] +@dispatch(sympy.Integer, SingletonInt) +def _eval_is_ge(a, b): + if a < 2: + return sympy.false + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") + + +@dispatch(SingletonInt, sympy.Integer) # type: ignore[no-redef] +def _eval_is_ge(a, b): # noqa: F811 + if b <= 2: + return sympy.true + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") + + +@dispatch(SingletonInt, SingletonInt) # type: ignore[no-redef] +def _eval_is_ge(a, b): # noqa: F811 + if a._val == b._val: + if a._coeff >= b._coeff: + return sympy.true + else: + return sympy.false + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/solve.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/solve.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1113bea891df2be89e7bb324a558a73afc0425 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_sympy/solve.py @@ -0,0 +1,175 @@ +import logging + +from typing import Dict, Optional, Tuple, Type + +import sympy + +from torch.utils._sympy.functions import FloorDiv + +log = logging.getLogger(__name__) + +_MIRROR_REL_OP: Dict[Type[sympy.Basic], Type[sympy.Rel]] = { + sympy.Eq: sympy.Eq, + sympy.Ne: sympy.Ne, + sympy.Ge: sympy.Le, + sympy.Gt: sympy.Lt, + sympy.Le: sympy.Ge, + sympy.Lt: sympy.Gt, +} + +INEQUALITY_TYPES = (sympy.Gt, sympy.Ge, sympy.Lt, sympy.Le) + + +def mirror_rel_op(type: Type) -> Optional[Type[sympy.Rel]]: + return _MIRROR_REL_OP.get(type, None) + + +# Tries to simplify 'expr', so as to leave only 'thing' in the left-hand side. +# +# Returns a tuple of: +# 1. The simplified expression +# 2. The expression on the right-hand side +# +# Returns 'None' if it can't reach a state where the only thing in the left +# hand side is 'thing'. +# +# 'trials': number of times 'try_solve' will try to isolate 'thing' to the +# left-hand side. +# +# 'floordiv_inequality': flag to enable conversion of 'FloorDiv' into +# inequalities. +def try_solve( + expr: sympy.Basic, + thing: sympy.Basic, + trials: int = 5, + floordiv_inequality: bool = True, +) -> Optional[Tuple[sympy.Rel, sympy.Basic]]: + mirror = mirror_rel_op(type(expr)) + + # Ignore unsupported expressions: + # - Those that are not relational operations + # - Those that don't have a mirror (just avoiding unexpected classes) + if not isinstance(expr, sympy.Rel) or mirror is None: + log.debug("expression with unsupported type: %s", type(expr)) + return None + + lhs_has_thing = expr.lhs.has(thing) + rhs_has_thing = expr.rhs.has(thing) + + # Give up when 'thing' appears on both sides of the relational expression. + # That is because, as is, we assume the thing we are trying to isolate is + # only on the right-hand side. + if lhs_has_thing and rhs_has_thing: + log.debug("thing (%s) found in both sides of expression: %s", thing, expr) + return None + + # Try considering both LHS and RHS by mirroring the original expression: + # a < b ==> b > a + expressions = [] + + # Add each version of 'expr' if 'thing' is in its left-hand side. + if lhs_has_thing: + expressions.append(expr) + if rhs_has_thing: + expressions.append(mirror(expr.rhs, expr.lhs)) + + for e in expressions: + if e is None: + continue + + assert isinstance(e, sympy.Rel) + + for _ in range(trials): + trial = _try_isolate_lhs(e, thing, floordiv_inequality=floordiv_inequality) + # Stop if there was no change in this trial. + if trial == e: + break + e = trial # type: ignore[assignment] + + # Return if we were able to isolate 'thing' on the left-hand side. + if isinstance(e, sympy.Rel) and e.lhs == thing: + return e, e.rhs + + return None + + +def _try_isolate_lhs( + expr: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool +) -> sympy.Basic: + e = expr + op = type(expr) + + if isinstance(e, sympy.Rel): + # Move any constants in the left-hand side to the right-hand side. + lhs_not_thing = ( + sum([a for a in e.lhs.args if not a.has(thing)]) + if isinstance(e.lhs, sympy.Add) + else 0 + ) + e = op(expr.lhs - lhs_not_thing, expr.rhs - lhs_not_thing) # type: ignore[attr-defined] + + # Divide both sides by the factors that don't contain thing. + if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul): + lhs, rhs = e.args + other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)]) + + # If we can't tell whether 'other' is negative or positive, we do nothing. + # That is because we don't know whether we have mirror the operation or not. + if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None): + # Divide both sides by 'other'. + lhs = lhs / other + rhs = rhs / other + + # If 'e' is an inequality and 'other' is negative, we have to + # mirror the expression. + if isinstance(e, INEQUALITY_TYPES) and other.is_negative: + op = mirror_rel_op(op) # type: ignore[assignment] + + assert op is not None + e = op(lhs, rhs) + + ################################################################################ + # left-hand side is FloorDiv + ################################################################################ + # + # Given the expression: a // b op c + # where 'op' is a relational operation, these rules only work if: + # - b > 0 + # - c is an integer + if ( + floordiv_inequality + and isinstance(e, sympy.Rel) + and isinstance(e.lhs, FloorDiv) + and e.lhs.divisor.is_positive + and e.rhs.is_integer + ): + # a // b == expr + # => a >= (b * expr) and a < (b * (expr + 1)) + if isinstance(expr, sympy.Eq): + numerator, denominator = e.lhs.args + return sympy.And( + sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type] + sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type] + ) + # a // b != expr + # => a < (b * expr) or a >= (b * (expr + 1)) + if isinstance(expr, sympy.Ne): + numerator, denominator = e.lhs.args + return sympy.Or( + sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type] + sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type] + ) + # The transformations below only work if b is positive. + # Note: we only have this information for constants. + # a // b > expr => a >= b * (expr + 1) + # a // b >= expr => a >= b * expr + if isinstance(expr, (sympy.Gt, sympy.Ge)): + quotient = e.rhs if isinstance(expr, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type] + return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type] + # a // b < expr => a < b * expr + # a // b <= expr => a < b * (expr + 1) + if isinstance(expr, (sympy.Lt, sympy.Le)): + quotient = e.rhs if isinstance(expr, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type] + return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type] + + return e diff --git a/venv/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py b/venv/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f4ca9a6b77d7f5f9178fa3e05f842ee324edf2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py @@ -0,0 +1,782 @@ +from __future__ import annotations + +import dataclasses +import itertools +import sympy +from sympy.logic.boolalg import BooleanAtom, Boolean as SympyBoolean +import operator +import math +import logging +import torch +from typing import Dict, Optional, SupportsFloat, TypeVar, Generic, Union, overload, Callable, TYPE_CHECKING +from typing_extensions import TypeGuard + +from torch._prims_common import dtype_to_type +from .interp import sympy_interp +from .functions import Round, RoundDecimal + +log = logging.getLogger(__name__) + +__all__ = ["ValueRanges", "ValueRangeAnalysis", "bound_sympy"] + +_T = TypeVar('_T', sympy.Expr, SympyBoolean) + +class ValueRangeError(RuntimeError): + pass + + +# Like sympify, but supports less stuff, and also ensures that direct +# sympy expressions don't have free variables +def simple_sympify(e): + if isinstance(e, bool): + return sympy.true if e else sympy.false + elif isinstance(e, int): + return sympy.Integer(e) + elif isinstance(e, float): + # infinity is special; we use it to bracket integers as well + if math.isinf(e): + return sympy.oo if e > 0 else -sympy.oo + return sympy.Float(e) + elif isinstance(e, sympy.Expr): + assert e.is_number, e + # NaNs can occur when doing things like 0 * sympy.oo, but it is better + # if the operator notices this and takes care of it, because sometimes + # the NaN is inappropriate (for example, for ints, the [-oo, oo] range + # should go to zero when multiplied with [0, 0]) + assert e != sympy.nan + return e + elif isinstance(e, BooleanAtom): + return e + else: + raise AssertionError(f"not simple sympy type {type(e)}: {e}") + + +# Sympy atomics only. Unlike <=, it also works on Sympy bools. +def sympy_generic_le(lower, upper): + if isinstance(lower, sympy.Expr): + assert isinstance(upper, sympy.Expr) + return lower <= upper + else: + # only negative condition is True > False + assert isinstance(lower, SympyBoolean) and isinstance(upper, SympyBoolean) + return not (lower and not upper) + + +def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]: + return vr.is_bool + + +def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]: + return not vr.is_bool + + +ExprIn = Union[int, float, sympy.Expr] +BoolIn = Union[bool, SympyBoolean] +AllIn = Union[ExprIn, BoolIn] +ExprFn = Callable[[sympy.Expr], sympy.Expr] +ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr] +BoolFn = Callable[[SympyBoolean], SympyBoolean] +BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean] +AllFn = Union[ExprFn, BoolFn] +AllFn2 = Union[ExprFn2, BoolFn2] + + +@dataclasses.dataclass(frozen=True) +class ValueRanges(Generic[_T]): + if TYPE_CHECKING: + # ruff doesn't understand circular references but mypy does + ExprVR = ValueRanges[sympy.Expr] # noqa: F821 + BoolVR = ValueRanges[SympyBoolean] # noqa: F821 + AllVR = Union[ExprVR, BoolVR] + + # Although the type signature here suggests you can pass any + # sympy expression, in practice the analysis here only works + # with constant sympy expressions + lower: _T + upper: _T + is_bool: bool + + @overload + def __init__(self: ValueRanges[sympy.Expr], lower: ExprIn, upper: ExprIn) -> None: + ... + + @overload + def __init__(self: ValueRanges[SympyBoolean], lower: BoolIn, upper: BoolIn) -> None: + ... + + def __init__(self, lower: AllIn, upper: AllIn) -> None: + lower = simple_sympify(lower) + upper = simple_sympify(upper) + # TODO: when the bounds have free variables, this may be + # nontrivial to actually verify + if not sympy_generic_le(lower, upper): + raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]") + # Because this is a frozen class + object.__setattr__(self, "lower", lower) + object.__setattr__(self, "upper", upper) + object.__setattr__(self, "is_bool", isinstance(lower, SympyBoolean)) + assert isinstance(upper, SympyBoolean) == self.is_bool + + def boolify(self) -> ValueRanges[SympyBoolean]: + if vr_is_bool(self): + return self + elif self == ValueRanges.unknown(): + return ValueRanges.unknown_bool() + else: + raise AssertionError(f"not bool like {self}") + + def __contains__(self, x: AllIn) -> bool: + x = simple_sympify(x) + return sympy_generic_le(self.lower, x) and sympy_generic_le(x, self.upper) + + def issubset(self, other): + return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(self.upper, other.upper) + + def tighten(self, other) -> ValueRanges: + """Given two ValueRanges, returns their intersection""" + return self & other + + # Intersection + @overload + def __and__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]: + ... + + @overload + def __and__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]: + ... + + def __and__(self: AllVR, other: AllVR) -> AllVR: + if other == ValueRanges.unknown(): + return self + if self == ValueRanges.unknown(): + return other + assert self.is_bool == other.is_bool, (self, other) + if self.is_bool: + return ValueRanges(sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper)) + else: + return ValueRanges(sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper)) + + # Union + @overload + def __or__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]: + ... + + @overload + def __or__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]: + ... + + def __or__(self: AllVR, other: AllVR) -> AllVR: + if ValueRanges.unknown() in (self, other): + return ValueRanges.unknown() + assert self.is_bool == other.is_bool, (self, other) + if self.is_bool: + return ValueRanges(sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper)) + else: + return ValueRanges(sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper)) + + def is_singleton(self) -> bool: + return self.lower == self.upper + + # TODO: this doesn't work with bools but arguably it should + @staticmethod + def unknown() -> ValueRanges[sympy.Expr]: + return ValueRanges(-sympy.oo, sympy.oo) + + @staticmethod + def unknown_bool() -> ValueRanges[SympyBoolean]: + return ValueRanges(sympy.false, sympy.true) + + @overload + @staticmethod + # work around the fact that bool and int overlap + def wrap(arg: Union[ExprIn, ExprVR]) -> ExprVR: # type: ignore[overload-overlap] + ... + + @overload + @staticmethod + def wrap(arg: Union[BoolIn, BoolVR]) -> BoolVR: + ... + + @staticmethod + def wrap(arg: Union[AllIn, AllVR]) -> AllVR: + if isinstance(arg, ValueRanges): + return arg + # arg is either ExprIn or BoolIn, but we don't know it here + return ValueRanges(arg, arg) # type: ignore[arg-type] + + @staticmethod + def increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """Increasing: x <= y => f(x) <= f(y).""" + x = ValueRanges.wrap(x) + return ValueRanges(fn(x.lower), fn(x.upper)) + + @overload + @staticmethod + def decreasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + ... + + @overload + @staticmethod + def decreasing_map(x: Union[BoolIn, BoolVR], fn: BoolFn) -> BoolVR: + ... + + @staticmethod + def decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR: + """Decreasing: x <= y => f(x) >= f(y).""" + x = ValueRanges.wrap(x) + # consistently either Expr or Bool, but we don't know it here + return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type] + + @staticmethod + def monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """It's increasing or decreasing.""" + x = ValueRanges.wrap(x) + l = fn(x.lower) + u = fn(x.upper) + return ValueRanges(min(l, u), max(l, u)) + + @staticmethod + def convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """Fn is convex and has a minimum at 0.""" + x = ValueRanges.wrap(x) + if 0 in x: + return ValueRanges(0, max(fn(x.lower), fn(x.upper))) + else: + return ValueRanges.monotone_map(x, fn) + + @overload + @staticmethod + def coordinatewise_increasing_map(x: Union[ExprIn, ExprVR], y: Union[ExprIn, ExprVR], fn: ExprFn2) -> ExprVR: + ... + + @overload + @staticmethod + def coordinatewise_increasing_map(x: Union[BoolIn, BoolVR], y: Union[BoolIn, BoolVR], fn: BoolFn2) -> BoolVR: + ... + + @staticmethod + def coordinatewise_increasing_map(x: Union[AllIn, AllVR], y: Union[AllIn, AllVR], fn: AllFn2) -> AllVR: + """ + It's increasing on each coordinate. + + Mathematically: + For every 1 <= i <= n and x_i <= y_i we have that + f(x1, .., xn) <= f(x1, , yi, ..., xn) + """ + x, y = ValueRanges.wrap(x), ValueRanges.wrap(y) + return ValueRanges( + fn(x.lower, y.lower), # type: ignore[arg-type] + fn(x.upper, y.upper), # type: ignore[arg-type] + ) + + @classmethod + def coordinatewise_monotone_map(cls, x, y, fn): + """It's increasing or decreasing on each coordinate.""" + x, y = cls.wrap(x), cls.wrap(y) + products = [ + fn(a, b) + for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper]) + ] + return ValueRanges(min(products), max(products)) + +class SymPyValueRangeAnalysis: + """ + It gives bounds on a SymPy operator given bounds on its arguments + See the function `bound_sympy` for a function that applies this logic to a full SymPy expression + """ + + @staticmethod + def constant(value, dtype): + # NB: value is NOT a sympy expression, it's a constant! + is_python = isinstance(value, (int, float, bool)) + assert is_python or isinstance(value, (BooleanAtom, sympy.Integer, sympy.Number)) + + # using nan makes subsequent computation throw, and for the purposes of optimization + # returning -math.inf - math.inf is equivalent to giving up + if isinstance(value, SupportsFloat) and math.isnan(value): + return ValueRanges.unknown() + + if is_python: + type_ = dtype_to_type(dtype) + value = type_(value) + else: + # We do a type check on a best-effort basis + # We don't want to force a cast to sympy.Float if the value is Rational to avoid losing precision + if dtype == torch.bool: + assert isinstance(value, BooleanAtom) + elif dtype.is_floating_point: + assert not value.is_finite or value.is_real + else: + # dtype is intXX + assert value.is_integer + + return ValueRanges.wrap(value) + + @staticmethod + def not_(a): + a = ValueRanges.wrap(a) + a = a.boolify() + assert a.is_bool + return ValueRanges.decreasing_map(a, sympy.Not) + + @staticmethod + def or_(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, sympy.Or) + + @staticmethod + def and_(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, sympy.And) + + @staticmethod + def eq(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if a.is_singleton() and b.is_singleton() and a.lower == b.lower: + return ValueRanges.wrap(sympy.true) + elif a.lower > b.upper or b.lower > a.upper: # ranges disjoint + return ValueRanges.wrap(sympy.false) + return ValueRanges(sympy.false, sympy.true) + + @classmethod + def ne(cls, a, b): + return cls.not_(cls.eq(a, b)) + + @classmethod + def lt(cls, a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + assert a.is_bool == b.is_bool + if a.is_bool: + return cls.and_(cls.not_(a), b) + else: + if a.upper < b.lower: + return ValueRanges.wrap(sympy.true) + elif a.lower >= b.upper: + return ValueRanges.wrap(sympy.false) + return ValueRanges(sympy.false, sympy.true) + + @classmethod + def gt(cls, a, b): + return cls.lt(b, a) + + @classmethod + def le(cls, a, b): + return cls.not_(cls.gt(a, b)) + + @classmethod + def ge(cls, a, b): + return cls.not_(cls.lt(a, b)) + + @staticmethod + def add(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, operator.add) + + @classmethod + def mul(cls, a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + + assert a.is_bool == b.is_bool + if a.is_bool: + return cls.and_(a, b) + + def safe_mul(a, b): + # Make unknown() * wrap(0) == wrap(0) + if a == 0: + return a + elif b == 0: + return b + else: + return a * b + + return ValueRanges.coordinatewise_monotone_map(a, b, safe_mul) + + @classmethod + def div(cls, a, b): + return cls.truediv(a, b) + + @staticmethod + def truediv(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)): + return ValueRanges.unknown() + else: + return ValueRanges.coordinatewise_monotone_map(a, b, operator.truediv) + + @staticmethod + def floordiv(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)): + return ValueRanges.unknown() + else: + return ValueRanges.coordinatewise_monotone_map(a, b, operator.floordiv) + + @staticmethod + def mod(x, y): + x = ValueRanges.wrap(x) + y = ValueRanges.wrap(y) + if x.is_singleton() and y.is_singleton() and y.lower != 0: + return ValueRanges.wrap(x.lower % y.lower) + if y.lower <= 0: + return ValueRanges.unknown() + return ValueRanges(0, y.upper) + + @classmethod + def modular_indexing(cls, a, b, c): + return cls.mod(cls.floordiv(a, b), c) + + @classmethod + def is_non_overlapping_and_dense_indicator(cls, *args): + return ValueRanges.unknown() + + @classmethod + def pow(cls, a, b): + def is_integer(val): + return isinstance(val, int) or ( + hasattr(val, "is_integer") and val.is_integer + ) + + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + # Not implemented yet. It's a bit tricky + # If you want to implement it, compute the partial derivatives of a ** b + # and check the ranges where the function is increasing / decreasing + # Another non-tight way of doing this is defaulting to doing noting that for a > 0, a ** b == exp(b * log(a)) + # If this second option is implemented, by carefult about the types and possible infinities here and there. + if not b.is_singleton(): + return ValueRanges.unknown() + + b = b.lower + if a.is_singleton(): + a = a.lower + r = a ** b + if not r.is_finite: + return ValueRanges.unknown() + return ValueRanges.wrap(r) + + if b == 0: + if not a.lower.is_finite: + return ValueRanges.unknown() + type_ = sympy.Float if a.lower.is_real else sympy.Integer + return ValueRanges.wrap(type_(1)) + + if b < 0: + a = cls.reciprocal(a) + b = -b + + if a == ValueRanges.unknown(): + return ValueRanges.unknown() + + # Here b > 0 + if not is_integer(b): + # If the base is positive, then we're good, otherwise nothing's defined + if a.lower >= 0: + return ValueRanges.increasing_map(a, lambda x: x ** b) + else: + return ValueRanges.unknown() + else: + # b > 0 integer + if b % 2 == 0: + # x^n where n is even + return ValueRanges.convex_min_zero_map(a, lambda x: x ** b) + else: + # x^n where n is odd + return ValueRanges.increasing_map(a, lambda x: x ** b) + + @staticmethod + def reciprocal(x): + """ Needed as it's used in pow, but it won't appear on a SymPy expression """ + x = ValueRanges.wrap(x) + if 0 in x: + return ValueRanges.unknown() + else: + return ValueRanges.decreasing_map(x, lambda y: 1 / y) + + @staticmethod + def abs(x): + return ValueRanges.convex_min_zero_map(x, abs) + + @staticmethod + def exp(x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.exponential.exp) + + @staticmethod + def log(x): + x = ValueRanges.wrap(x) + if x.lower <= 0: + return ValueRanges.unknown() + return ValueRanges.increasing_map(x, sympy.log) + + @classmethod + def minimum(cls, a, b): + return cls.min_or_max(a, b, sympy.Min) + + @classmethod + def maximum(cls, a, b): + return cls.min_or_max(a, b, sympy.Max) + + @staticmethod + def min_or_max(a, b, fn): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + + # Performs upcasting first + def fn_(x: sympy.Expr, y: sympy.Expr) -> sympy.Expr: + # Poorman's version of upcasting in Sympy + # Inf is not a float... + if x.is_Integer and y.is_Integer: + result_type = sympy.Integer + elif x.is_rational and y.is_rational: + result_type = sympy.Rational + else: + assert x.is_real or not x.is_finite or y.is_real or not y.is_finite + result_type = sympy.Float + return fn(result_type(x), result_type(y)) + + return ValueRanges.coordinatewise_increasing_map(a, b, fn_) + + @classmethod + def floor(cls, x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.floor) + + @classmethod + def ceil(cls, x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.ceiling) + + @classmethod + def round(cls, number, ndigits=None): + if ndigits is None: + fn = Round + else: + assert ndigits.is_singleton() + ndigits = ndigits.lower + # We can't use functools.partial here since sympy doesn't support keyword arguments, but we have to bind + # the second parameter. + fn = lambda number: RoundDecimal(number, ndigits) # type: ignore[misc, assignment] # noqa: E731 + + return ValueRanges.increasing_map(number, fn) + + # It's used in some models on symints + @staticmethod + def sqrt(x): + x = ValueRanges.wrap(x) + if x.lower < 0: + return ValueRanges.unknown() + return ValueRanges.increasing_map(x, sympy.sqrt) + + @staticmethod + def where(a, b, c): + b = ValueRanges.wrap(b) + c = ValueRanges.wrap(c) + a = a.boolify() + assert b.is_bool == c.is_bool + if b.is_bool: + return ValueRanges(sympy.And(b.lower, c.lower), sympy.Or(b.upper, c.upper)) + else: + return ValueRanges(sympy.Min(b.lower, c.lower), sympy.Max(b.upper, c.upper)) + + # expr_cond_pair is used to represent a single (expr, condition) pair in piecewise. + # We just return the value range of the expression and its corresponding condition as a tuple + # and defer the analysis to piecewise + @staticmethod + def expr_cond_pair(a, b): + b = b.boolify() + return (a, b) + + # piecewise function can be used to convert a SymBool to SymInt: + # int_expr = Piecewise((1, bool_expr), (0, True)), it evalutes to 1 when sym_bool is True and 0 otherwise. + # + # ranges is a sequence of (expr_range, condition_range) pairs. The range pair is constructed in expr_cond_pair. + # The ValueRange of Piecewise is just the union of all expr ranges whose condition expr can be True. + @staticmethod + def piecewise(*ranges): + init_range = None + for expr_range, cond_range in ranges: + if sympy.true in cond_range: + if init_range is None: + init_range = expr_range + else: + init_range = init_range | expr_range + return init_range + + @staticmethod + def cos(x): + # TODO: We should tighten value ranges + # If input range span is pi + 2*pi*k, then output range is (-1, 1) + # otherwise the minimum of the value of the function on the extremes + return ValueRanges(-1.0, 1.0) + + @staticmethod + def cosh(x): + x = ValueRanges.wrap(x) + if x.lower > 0: + return ValueRanges.increasing_map(x, sympy.cosh) + elif x.upper < 0: + return ValueRanges.decreasing_map(x, sympy.cosh) + return ValueRanges(0.0, sympy.oo) + + @staticmethod + def sin(x): + # TODO: We should tighten value ranges + # See details on cos + return ValueRanges(-1.0, 1.0) + + @staticmethod + def sinh(x): + return ValueRanges.increasing_map(x, sympy.sinh) + + @staticmethod + def tan(x): + return ValueRanges(-sympy.oo, sympy.oo) + + @staticmethod + def tanh(x): + return ValueRanges.increasing_map(x, sympy.tanh) + + @staticmethod + def asin(x): + x = ValueRanges.wrap(x) + if -1 <= x.lower and x.upper <= 1: + return ValueRanges.increasing_map(x, sympy.asin) + return ValueRanges.unknown() + + @staticmethod + def acos(x): + x = ValueRanges.wrap(x) + if -1 <= x.lower and x.upper <= 1: + return ValueRanges.decreasing_map(x, sympy.acos) + return ValueRanges.unknown() + + @staticmethod + def atan(x): + return ValueRanges.increasing_map(x, sympy.atan) + + +class ValueRangeAnalysis(SymPyValueRangeAnalysis): + def __init__(self): + self.name = "ValueRangeAnalysis" + boolean_operators = ( + "xor", + "logical_and", + "logical_or", + "logical_not", + ) + for op in boolean_operators: + setattr(self, op, self.bool_handler) + + @staticmethod + def bool_handler(*args, **kwargs): + # just assuming bools can have both values + return ValueRanges(sympy.false, sympy.true) # type: ignore[arg-type] + + @staticmethod + def default_handler(*args, **kwargs): + # many ops are unlikely to show up in optimizable indexing compute, + # so we dont have full coverage + return ValueRanges.unknown() + + def load(self, name: str, index: sympy.Expr): + return ValueRanges.unknown() + + def store(self, name, index, value, mode=None): + return + + def reduction(self, name, dtype, src_dtype, reduction_type, index, value): + return ValueRanges.unknown() + + def index_expr(self, index, dtype): + assert isinstance(index, ValueRanges) + return index + + @staticmethod + def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None): + x = ValueRanges.wrap(x) + + if dtype == torch.bool: + if x.is_singleton(): + return ValueRanges.wrap(x.lower != 0) + elif 0 not in x: + return ValueRanges.wrap(sympy.true) + else: + return ValueRanges(sympy.false, sympy.true) + + def cast(x, dtype): + # dtype is int or float + if dtype.is_floating_point: + return sympy.Float(x) + else: + try: + return sympy.Integer(x) + except TypeError: + # inf cannot be cast to Integer + return x + + if x.is_bool: + if x.is_singleton(): + val = 1 if x.lower else 0 + return ValueRanges.wrap(cast(val, dtype)) + else: + return ValueRanges(cast(0, dtype), cast(1, dtype)) + else: + # int to float or float to int + return ValueRanges(cast(x.lower, dtype), cast(x.upper, dtype)) + + @staticmethod + def square(x): + return ValueRanges.convex_min_zero_map(x, lambda y: y * y) + + @staticmethod + def neg(x): + return ValueRanges.decreasing_map(x, operator.neg) + + @classmethod + def truncdiv(cls, a, b): + x = cls.truediv(a, b) + if x == ValueRanges.unknown(): + return x + + def trunc(x): + return sympy.Integer(x) if x.is_finite else x + + return ValueRanges.increasing_map(x, trunc) + + @classmethod + def sub(cls, a, b): + return cls.add(a, cls.neg(b)) + + def __getattr__(self, name): + log.debug("unhandled ValueRange op %s", name) + return self.default_handler + + +def bound_sympy(expr: sympy.Expr, ranges: Optional[Dict[sympy.Symbol, ValueRanges]] = None) -> ValueRanges: + if isinstance(expr, sympy.Number): + return ValueRanges.wrap(expr) + + ranges = ranges or {} + + # If there's a tracing context, augment available constrained ranges. + context = torch._guards.TracingContext.try_get() + if context and context.fake_mode.shape_env: + ranges = {**context.fake_mode.shape_env.var_to_range, **ranges} + + unbounded_vars = expr.free_symbols - ranges.keys() + if unbounded_vars: + # Give some bounds to the free variables via their SymPy assumptions + # TODO A better way of doing this would be to assign them a range upon creation, as + # size variables can come with a lower bound of 2, as we specialise on 0 and 1 + unbounded_ranges: Dict[sympy.Symbol, ValueRanges] = {} + for s in unbounded_vars: + assert s.is_integer # type: ignore[attr-defined] + if s.is_positive: # type: ignore[attr-defined] + lower = 1 + elif s.is_nonnegative: # type: ignore[attr-defined] + lower = 0 + else: + lower = -math.inf # type: ignore[assignment] + unbounded_ranges[s] = ValueRanges(lower, math.inf) # type: ignore[index] + ranges = {**ranges, **unbounded_ranges} + + return sympy_interp(SymPyValueRangeAnalysis, ranges, expr) diff --git a/venv/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd16eec5aca342c4eab9e3910daac074225d6dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py @@ -0,0 +1,21 @@ +from torch._C import _set_backcompat_broadcast_warn +from torch._C import _get_backcompat_broadcast_warn +from torch._C import _set_backcompat_keepdim_warn +from torch._C import _get_backcompat_keepdim_warn + + +class Warning: + def __init__(self, setter, getter): + self.setter = setter + self.getter = getter + + def set_enabled(self, value): + self.setter(value) + + def get_enabled(self): + return self.getter() + + enabled = property(get_enabled, set_enabled) + +broadcast_warning = Warning(_set_backcompat_broadcast_warn, _get_backcompat_broadcast_warn) +keepdim_warning = Warning(_set_backcompat_keepdim_warn, _get_backcompat_keepdim_warn) diff --git a/venv/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4388b49f88be652e455b2b584faeba775d906938 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d137c5e3dae042855401e7a4245f729ff32de57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d197c0f7c46684fa4693641672d4d26645930e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc8bdfc54e0a9ea6878d367150f68f42cd44c2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fda6924fdab490981097adf4ed24a386f259117a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03086dcd9ea8c9e8d2aa52bef2d702e4a8bbaaa1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f23022fa6d16a9e0086f35cc7acc11c3c7e5a5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f412cca599db72ee8e7cbbb4dd6749d1d8c361 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae7a70c78c922b5af5973412f9e98f259d71d18f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py new file mode 100644 index 0000000000000000000000000000000000000000..c08acb50950f891940fc845a705abfe65e7ddae4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py @@ -0,0 +1,221 @@ +import collections +import os +import shutil +import subprocess + +try: + # no type stub for conda command line interface + import conda.cli.python_api # type: ignore[import] + from conda.cli.python_api import Commands as conda_commands +except ImportError: + # blas_compare.py will fail to import these when it's inside a conda env, + # but that's fine as it only wants the constants. + pass + + +WORKING_ROOT = "/tmp/pytorch_blas_compare_environments" +MKL_2020_3 = "mkl_2020_3" +MKL_2020_0 = "mkl_2020_0" +OPEN_BLAS = "open_blas" +EIGEN = "eigen" + + +GENERIC_ENV_VARS = ("USE_CUDA=0", "USE_ROCM=0") +BASE_PKG_DEPS = ( + "cmake", + "hypothesis", + "ninja", + "numpy", + "pyyaml", + "setuptools", + "typing_extensions", +) + + +SubEnvSpec = collections.namedtuple( + "SubEnvSpec", ( + "generic_installs", + "special_installs", + "environment_variables", + + # Validate install. + "expected_blas_symbols", + "expected_mkl_version", + )) + + +SUB_ENVS = { + MKL_2020_3: SubEnvSpec( + generic_installs=(), + special_installs=("intel", ("mkl=2020.3", "mkl-include=2020.3")), + environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS, + expected_blas_symbols=("mkl_blas_sgemm",), + expected_mkl_version="2020.0.3", + ), + + MKL_2020_0: SubEnvSpec( + generic_installs=(), + special_installs=("intel", ("mkl=2020.0", "mkl-include=2020.0")), + environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS, + expected_blas_symbols=("mkl_blas_sgemm",), + expected_mkl_version="2020.0.0", + ), + + OPEN_BLAS: SubEnvSpec( + generic_installs=("openblas",), + special_installs=(), + environment_variables=("BLAS=OpenBLAS",) + GENERIC_ENV_VARS, + expected_blas_symbols=("exec_blas",), + expected_mkl_version=None, + ), + + # EIGEN: SubEnvSpec( + # generic_installs=(), + # special_installs=(), + # environment_variables=("BLAS=Eigen",) + GENERIC_ENV_VARS, + # expected_blas_symbols=(), + # ), +} + + +def conda_run(*args): + """Convenience method.""" + stdout, stderr, retcode = conda.cli.python_api.run_command(*args) + if retcode: + raise OSError(f"conda error: {str(args)} retcode: {retcode}\n{stderr}") + + return stdout + + +def main(): + if os.path.exists(WORKING_ROOT): + print("Cleaning: removing old working root.") + shutil.rmtree(WORKING_ROOT) + os.makedirs(WORKING_ROOT) + + git_root = subprocess.check_output( + "git rev-parse --show-toplevel", + shell=True, + cwd=os.path.dirname(os.path.realpath(__file__)) + ).decode("utf-8").strip() + + for env_name, env_spec in SUB_ENVS.items(): + env_path = os.path.join(WORKING_ROOT, env_name) + print(f"Creating env: {env_name}: ({env_path})") + conda_run( + conda_commands.CREATE, + "--no-default-packages", + "--prefix", env_path, + "python=3", + ) + + print("Testing that env can be activated:") + base_source = subprocess.run( + f"source activate {env_path}", + shell=True, + capture_output=True, + check=False, + ) + if base_source.returncode: + raise OSError( + "Failed to source base environment:\n" + f" stdout: {base_source.stdout.decode('utf-8')}\n" + f" stderr: {base_source.stderr.decode('utf-8')}" + ) + + print("Installing packages:") + conda_run( + conda_commands.INSTALL, + "--prefix", env_path, + *(BASE_PKG_DEPS + env_spec.generic_installs) + ) + + if env_spec.special_installs: + channel, channel_deps = env_spec.special_installs + print(f"Installing packages from channel: {channel}") + conda_run( + conda_commands.INSTALL, + "--prefix", env_path, + "-c", channel, *channel_deps + ) + + if env_spec.environment_variables: + print("Setting environment variables.") + + # This does not appear to be possible using the python API. + env_set = subprocess.run( + f"source activate {env_path} && " + f"conda env config vars set {' '.join(env_spec.environment_variables)}", + shell=True, + capture_output=True, + check=False, + ) + if env_set.returncode: + raise OSError( + "Failed to set environment variables:\n" + f" stdout: {env_set.stdout.decode('utf-8')}\n" + f" stderr: {env_set.stderr.decode('utf-8')}" + ) + + # Check that they were actually set correctly. + actual_env_vars = subprocess.run( + f"source activate {env_path} && env", + shell=True, + capture_output=True, + check=True, + ).stdout.decode("utf-8").strip().splitlines() + for e in env_spec.environment_variables: + assert e in actual_env_vars, f"{e} not in envs" + + print(f"Building PyTorch for env: `{env_name}`") + # We have to re-run during each build to pick up the new + # build config settings. + build_run = subprocess.run( + f"source activate {env_path} && " + f"cd {git_root} && " + "python setup.py install --cmake", + shell=True, + capture_output=True, + check=True, + ) + + print("Checking configuration:") + check_run = subprocess.run( + # Shameless abuse of `python -c ...` + f"source activate {env_path} && " + "python -c \"" + "import torch;" + "from torch.utils.benchmark import Timer;" + "print(torch.__config__.show());" + "setup = 'x=torch.ones((128, 128));y=torch.ones((128, 128))';" + "counts = Timer('torch.mm(x, y)', setup).collect_callgrind(collect_baseline=False);" + "stats = counts.as_standardized().stats(inclusive=True);" + "print(stats.filter(lambda l: 'blas' in l.lower()))\"", + shell=True, + capture_output=True, + check=False, + ) + if check_run.returncode: + raise OSError( + "Failed to set environment variables:\n" + f" stdout: {check_run.stdout.decode('utf-8')}\n" + f" stderr: {check_run.stderr.decode('utf-8')}" + ) + check_run_stdout = check_run.stdout.decode('utf-8') + print(check_run_stdout) + + for e in env_spec.environment_variables: + if "BLAS" in e: + assert e in check_run_stdout, f"PyTorch build did not respect `BLAS=...`: {e}" + + for s in env_spec.expected_blas_symbols: + assert s in check_run_stdout + + if env_spec.expected_mkl_version is not None: + assert f"- Intel(R) Math Kernel Library Version {env_spec.expected_mkl_version}" in check_run_stdout + + print(f"Build complete: {env_name}") + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py new file mode 100644 index 0000000000000000000000000000000000000000..6f99d9d06ad58f583b9e72d89e04d3f90964f48c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py @@ -0,0 +1,98 @@ +"""Example of Timer and Compare APIs: + +$ python -m examples.compare +""" + +import pickle +import sys +import time + +import torch + +import torch.utils.benchmark as benchmark_utils + + +class FauxTorch: + """Emulate different versions of pytorch. + + In normal circumstances this would be done with multiple processes + writing serialized measurements, but this simplifies that model to + make the example clearer. + """ + def __init__(self, real_torch, extra_ns_per_element): + self._real_torch = real_torch + self._extra_ns_per_element = extra_ns_per_element + + def extra_overhead(self, result): + # time.sleep has a ~65 us overhead, so only fake a + # per-element overhead if numel is large enough. + numel = int(result.numel()) + if numel > 5000: + time.sleep(numel * self._extra_ns_per_element * 1e-9) + return result + + def add(self, *args, **kwargs): + return self.extra_overhead(self._real_torch.add(*args, **kwargs)) + + def mul(self, *args, **kwargs): + return self.extra_overhead(self._real_torch.mul(*args, **kwargs)) + + def cat(self, *args, **kwargs): + return self.extra_overhead(self._real_torch.cat(*args, **kwargs)) + + def matmul(self, *args, **kwargs): + return self.extra_overhead(self._real_torch.matmul(*args, **kwargs)) + + +def main(): + tasks = [ + ("add", "add", "torch.add(x, y)"), + ("add", "add (extra +0)", "torch.add(x, y + zero)"), + ] + + serialized_results = [] + repeats = 2 + timers = [ + benchmark_utils.Timer( + stmt=stmt, + globals={ + "torch": torch if branch == "master" else FauxTorch(torch, overhead_ns), + "x": torch.ones((size, 4)), + "y": torch.ones((1, 4)), + "zero": torch.zeros(()), + }, + label=label, + sub_label=sub_label, + description=f"size: {size}", + env=branch, + num_threads=num_threads, + ) + for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 5)] + for label, sub_label, stmt in tasks + for size in [1, 10, 100, 1000, 10000, 50000] + for num_threads in [1, 4] + ] + + for i, timer in enumerate(timers * repeats): + serialized_results.append(pickle.dumps( + timer.blocked_autorange(min_run_time=0.05) + )) + print(f"\r{i + 1} / {len(timers) * repeats}", end="") + sys.stdout.flush() + print() + + comparison = benchmark_utils.Compare([ + pickle.loads(i) for i in serialized_results + ]) + + print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n") + comparison.print() + + print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n") + comparison.trim_significant_figures() + comparison.colorize() + comparison.print() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py new file mode 100644 index 0000000000000000000000000000000000000000..9728bf3d26c9fec31a53be4e9f275ce892379d1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py @@ -0,0 +1,85 @@ +"""Example of the Timer and Fuzzer APIs: + +$ python -m examples.fuzzer +""" + +import sys + +import torch.utils.benchmark as benchmark_utils + + +def main(): + add_fuzzer = benchmark_utils.Fuzzer( + parameters=[ + [ + benchmark_utils.FuzzedParameter( + name=f"k{i}", + minval=16, + maxval=16 * 1024, + distribution="loguniform", + ) for i in range(3) + ], + benchmark_utils.FuzzedParameter( + name="d", + distribution={2: 0.6, 3: 0.4}, + ), + ], + tensors=[ + [ + benchmark_utils.FuzzedTensor( + name=name, + size=("k0", "k1", "k2"), + dim_parameter="d", + probability_contiguous=0.75, + min_elements=64 * 1024, + max_elements=128 * 1024, + ) for name in ("x", "y") + ], + ], + seed=0, + ) + + n = 250 + measurements = [] + for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)): + x, x_order = tensors["x"], str(tensor_properties["x"]["order"]) + y, y_order = tensors["y"], str(tensor_properties["y"]["order"]) + shape = ", ".join(tuple(f'{i:>4}' for i in x.shape)) + + description = "".join([ + f"{x.numel():>7} | {shape:<16} | ", + f"{'contiguous' if x.is_contiguous() else x_order:<12} | ", + f"{'contiguous' if y.is_contiguous() else y_order:<12} | ", + ]) + + timer = benchmark_utils.Timer( + stmt="x + y", + globals=tensors, + description=description, + ) + + measurements.append(timer.blocked_autorange(min_run_time=0.1)) + measurements[-1].metadata = {"numel": x.numel()} + print(f"\r{i + 1} / {n}", end="") + sys.stdout.flush() + print() + + # More string munging to make pretty output. + print(f"Average attempts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}") + + def time_fn(m): + return m.median / m.metadata["numel"] + measurements.sort(key=time_fn) + + template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}" + print(template.format("Best:")) + for m in measurements[:15]: + print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}") + + print("\n" + template.format("Worst:")) + for m in measurements[-15:]: + print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}") + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..b7536b9ec26bb868079f21cb4c65ca7260f59a81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py @@ -0,0 +1,103 @@ +"""Example use of Timer and op fuzzers to measure kernel performance. + +$ python -m examples.op_benchmark +""" + +import numpy as np +import torch + +from torch.utils.benchmark import Timer +from torch.utils.benchmark.op_fuzzers.binary import BinaryOpFuzzer +from torch.utils.benchmark.op_fuzzers.unary import UnaryOpFuzzer + + +_MEASURE_TIME = 1.0 + + +def assert_dicts_equal(dict_0, dict_1): + """Builtin dict comparison will not compare numpy arrays. + e.g. + x = {"a": np.ones((2, 1))} + x == x # Raises ValueError + """ + assert set(dict_0.keys()) == set(dict_0.keys()) + assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype") + + +def run(n, stmt, fuzzer_cls): + float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n) + int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n) + raw_results = [] + for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)): + float_tensors, float_tensor_params, float_params = float_values + int_tensors, int_tensor_params, int_params = int_values + + # This benchmark assumes that the two fuzzers generate identically + # sized and strided Tensors, since the same seed is used. + assert_dicts_equal(float_params, int_params) + assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"]) + + float_measurement, int_measurement = ( + Timer( + stmt, + globals=tensors, + ).blocked_autorange(min_run_time=_MEASURE_TIME) + for tensors in (float_tensors, int_tensors) + ) + + descriptions = [] + for name in float_tensors: + shape_str = "(" + ", ".join([ + f"2 ** {int(np.log2(i))}" + if 2 ** int(np.log2(i)) == i and i > 1 + else str(i) + for i in float_tensors[name].shape + ]) + ")" + order = float_tensor_params[name]["order"] + order_str = ("" if all(order == np.arange(len(order))) else str(tuple(order))) + steps = float_tensor_params[name]["steps"] + steps_str = str(steps) if sum(steps) > len(steps) else "" + descriptions.append((name, shape_str, order_str, steps_str)) + raw_results.append((float_measurement, int_measurement, descriptions)) + + print(f"\r{i + 1} / {n}", end="") + print() + + parsed_results, name_len, shape_len, order_len, steps_len = [], 0, 0, 0, 0 + for float_measurement, int_measurement, descriptions in raw_results: + t_float = float_measurement.median * 1e6 + t_int = int_measurement.median * 1e6 + rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2 + parsed_results.append((t_float, t_int, rel_diff, descriptions)) + for name, shape, order, steps in descriptions: + name_len = max(name_len, len(name)) + shape_len = max(shape_len, len(shape)) + order_len = max(order_len, len(order)) + steps_len = max(steps_len, len(steps)) + + parsed_results.sort(key=lambda x: x[2]) + + print(f"stmt: {stmt}") + print(f" diff faster{'':>17}{' ' * name_len} ", end="") + print(f"{'shape'.ljust(shape_len)}{'':>16}{'order'.ljust(order_len)}", end="") + print(f" steps\n{'-' * 100}") + for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]: + for t_float, t_int, rel_diff, descriptions in results: + time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"] + time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]]) + for t_str, (name, shape, order, steps) in zip(time_str, descriptions): + name = f"{name}:".ljust(name_len + 1) + shape = shape.ljust(shape_len + 10) + order = order.ljust(order_len) + print(f"{t_str} {name} {shape}| {order} | {steps}") + print(spacer) + + +def main(): + run(n=100, stmt="torch.median(x, dim=0)", fuzzer_cls=UnaryOpFuzzer) + run(n=100, stmt="torch.square(x)", fuzzer_cls=UnaryOpFuzzer) + run(n=100, stmt="x + y", fuzzer_cls=BinaryOpFuzzer) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py new file mode 100644 index 0000000000000000000000000000000000000000..81aaa6dee981788e08771cfeb5afb5d201f1c7be --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py @@ -0,0 +1,25 @@ +"""Trivial use of Timer API: + +$ python -m examples.simple_timeit +""" + +import torch + +import torch.utils.benchmark as benchmark_utils + + +def main(): + timer = benchmark_utils.Timer( + stmt="x + y", + globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))}, + label="Broadcasting add (4x8)", + ) + + for i in range(3): + print(f"Run: {i}\n{'-' * 40}") + print(f"timeit:\n{timer.timeit(10000)}\n") + print(f"autorange:\n{timer.blocked_autorange()}\n\n") + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac54059416c78df761e060393b7442843adac88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py @@ -0,0 +1,113 @@ +"""Microbenchmarks for the torch.fft module""" +from argparse import ArgumentParser +from collections import namedtuple +from collections.abc import Iterable + +import torch +import torch.fft +from torch.utils import benchmark +from torch.utils.benchmark.op_fuzzers.spectral import SpectralOpFuzzer + + +def _dim_options(ndim): + if ndim == 1: + return [None] + elif ndim == 2: + return [0, 1, None] + elif ndim == 3: + return [0, 1, 2, (0, 1), (0, 2), None] + raise ValueError(f"Expected ndim in range 1-3, got {ndim}") + + +def run_benchmark(name: str, function: object, dtype: torch.dtype, seed: int, device: str, samples: int, + probability_regular: float): + cuda = device == 'cuda' + spectral_fuzzer = SpectralOpFuzzer(seed=seed, dtype=dtype, cuda=cuda, + probability_regular=probability_regular) + results = [] + for tensors, tensor_params, params in spectral_fuzzer.take(samples): + shape = [params['k0'], params['k1'], params['k2']][:params['ndim']] + str_shape = ' x '.join([f"{s:<4}" for s in shape]) + sub_label = f"{str_shape} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}" + for dim in _dim_options(params['ndim']): + for nthreads in (1, 4, 16) if not cuda else (1,): + measurement = benchmark.Timer( + stmt='func(x, dim=dim)', + globals={'func': function, 'x': tensors['x'], 'dim': dim}, + label=f"{name}_{device}", + sub_label=sub_label, + description=f"dim={dim}", + num_threads=nthreads, + ).blocked_autorange(min_run_time=1) + measurement.metadata = { + 'name': name, + 'device': device, + 'dim': dim, + 'shape': shape, + } + measurement.metadata.update(tensor_params['x']) + results.append(measurement) + return results + + +Benchmark = namedtuple('Benchmark', ['name', 'function', 'dtype']) +BENCHMARKS = [ + Benchmark('fft_real', torch.fft.fftn, torch.float32), + Benchmark('fft_complex', torch.fft.fftn, torch.complex64), + Benchmark('ifft', torch.fft.ifftn, torch.complex64), + Benchmark('rfft', torch.fft.rfftn, torch.float32), + Benchmark('irfft', torch.fft.irfftn, torch.complex64), +] +BENCHMARK_MAP = {b.name: b for b in BENCHMARKS} +BENCHMARK_NAMES = [b.name for b in BENCHMARKS] +DEVICE_NAMES = ['cpu', 'cuda'] + +def _output_csv(file, results): + file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n') + for measurement in results: + metadata = measurement.metadata + device, dim, shape, name, numel, contiguous = ( + metadata['device'], metadata['dim'], metadata['shape'], + metadata['name'], metadata['numel'], metadata['is_contiguous']) + + if isinstance(dim, Iterable): + dim_str = '-'.join(str(d) for d in dim) + else: + dim_str = str(dim) + shape_str = 'x'.join(str(s) for s in shape) + + print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str, # type: ignore[possibly-undefined] + measurement.mean * 1e6, measurement.median * 1e6, measurement.iqr * 1e6, + sep=',', file=file) + + +if __name__ == '__main__': + parser = ArgumentParser(description=__doc__) + parser.add_argument('--device', type=str, choices=DEVICE_NAMES, nargs='+', default=DEVICE_NAMES) + parser.add_argument('--bench', type=str, choices=BENCHMARK_NAMES, nargs='+', default=BENCHMARK_NAMES) + parser.add_argument('--seed', type=int, default=0) + parser.add_argument('--samples', type=int, default=10) + parser.add_argument('--probability-regular', '--probability_regular', type=float, default=1.0) + parser.add_argument('-o', '--output', type=str) + args = parser.parse_args() + + num_benchmarks = len(args.device) * len(args.bench) + i = 0 + results = [] + for device in args.device: + for bench in (BENCHMARK_MAP[b] for b in args.bench): + results += run_benchmark( + name=bench.name, function=bench.function, dtype=bench.dtype, + seed=args.seed, device=device, samples=args.samples, + probability_regular=args.probability_regular) + i += 1 + print(f'Completed {bench.name} benchmark on {device} ({i} of {num_benchmarks})') + + if args.output is not None: + with open(args.output, 'w') as f: + _output_csv(f, results) + + compare = benchmark.Compare(results) + compare.trim_significant_figures() + compare.colorize() + compare.print() diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da738b03aed701c5af3f219e5c2ffe12bf52acff Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c52a38ec7ccc51c84b7dfedeaf3609fbba6e3c87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdbe23fffc8742cc08a3dbcbd699c588605cfb82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d35c6656bf69a6309ae5d3488f1e4cf964ffb484 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f13231ac7b5dca602f8870b8aaf6e8d4aba372c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea5474dc597b8acde6768d389bf1635ffb580057 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py new file mode 100644 index 0000000000000000000000000000000000000000..91289d88db8adcf2b9b538945c9b04e564ee5662 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py @@ -0,0 +1,106 @@ +import numpy as np +import torch + +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + + +class BinaryOpFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + + # Shapes for `x` and `y`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + # Moreover, `y` will occasionally have singleton + # dimensions in order to test broadcasting. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + + [ + FuzzedParameter( + name=f"y_k{i}", + distribution={ + ParameterAlias(f"k{i}"): 0.8, + 1: 0.2, + }, + strict=True, + ) for i in range(3) + ], + + # Steps for `x` and `y`. (Benchmarks strided memory access.) + [ + FuzzedParameter( + name=f"{name}_step_{i}", + distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, + ) + for i in range(3) + for name in ("x", "y") + ], + + # Repeatable entropy for downstream applications. + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedTensor( + name="x", + size=("k0", "k1", "k2"), + steps=("x_step_0", "x_step_1", "x_step_2"), + probability_contiguous=0.75, + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="dim", + dtype=dtype, + cuda=cuda, + ), + FuzzedTensor( + name="y", + size=("y_k0", "y_k1", "y_k2"), + steps=("x_step_0", "x_step_1", "x_step_2"), + probability_contiguous=0.75, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="dim", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..984493fe4a718bc995c9cf32ab6165581022fb43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py @@ -0,0 +1,106 @@ +import numpy as np +import torch + +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + + +class BinaryOpSparseFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + FuzzedParameter( + name="sparse_dim", + distribution={1: 0.4, 2: 0.4, 3: 0.2}, + strict=True + ), + # Shapes for `x` and `y`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + # Moreover, `y` will occasionally have singleton + # dimensions in order to test broadcasting. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"y_k{i}", + distribution={ + ParameterAlias(f"k{i}"): 1.0}, + strict=True, + ) for i in range(3) + ], + FuzzedParameter( + name="density", + distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, + ), + FuzzedParameter( + name="coalesced", + distribution={True: 0.5, False: 0.5}, + ), + # Repeatable entropy for downstream applications. + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedSparseTensor( + name="x", + size=("k0", "k1", "k2"), + dim_parameter="dim_parameter", + sparse_dim="sparse_dim", + density="density", + coalesced="coalesced", + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + dtype=dtype, + cuda=cuda, + ), + FuzzedSparseTensor( + name="y", + size=("y_k0", "y_k1", "y_k2"), + dim_parameter="dim_parameter", + sparse_dim="sparse_dim", + density="density", + coalesced="coalesced", + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..70b5ae3cd3a5dd6a73d0704a199728e6be512cac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py @@ -0,0 +1,82 @@ + +import numpy as np +import torch +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + +class UnaryOpSparseFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + FuzzedParameter( + name="sparse_dim", + distribution={1: 0.4, 2: 0.4, 3: 0.2}, + strict=True + ), + # Shapes for `x`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + FuzzedParameter( + name="density", + distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, + ), + FuzzedParameter( + name="coalesced", + distribution={True: 0.5, False: 0.5}, + ), + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedSparseTensor( + name="x", + size=("k0", "k1", "k2"), + dim_parameter="dim_parameter", + sparse_dim="sparse_dim", + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + density="density", + coalesced="coalesced", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..29359ba3edb6f0fb8554476529d11d9fcdedecdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py @@ -0,0 +1,93 @@ +import math + +import torch +from torch.utils import benchmark +from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias + + +__all__ = ['SpectralOpFuzzer'] + +MIN_DIM_SIZE = 16 +MAX_DIM_SIZE = 16 * 1024 + +def power_range(upper_bound, base): + return (base ** i for i in range(int(math.log(upper_bound, base)) + 1)) + +# List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE +# These numbers factorize into multiples of prime factors 2, 3, and 5 only +# and are usually the fastest in FFT implementations. +REGULAR_SIZES = [] +for i in power_range(MAX_DIM_SIZE, 2): + for j in power_range(MAX_DIM_SIZE // i, 3): + ij = i * j + for k in power_range(MAX_DIM_SIZE // ij, 5): + ijk = ij * k + if ijk > MIN_DIM_SIZE: + REGULAR_SIZES.append(ijk) +REGULAR_SIZES.sort() + +class SpectralOpFuzzer(benchmark.Fuzzer): + def __init__(self, *, seed: int, dtype=torch.float64, + cuda: bool = False, probability_regular: float = 1.0): + super().__init__( + parameters=[ + # Dimensionality of x. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + + # Shapes for `x`. + # It is important to test all shapes, however + # regular sizes are especially important to the FFT and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the regular numbers + # (both distributions are loguniform) and then randomly + # selecting between the two. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=MIN_DIM_SIZE, + maxval=MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_regular_{i}", + distribution={size: 1. / len(REGULAR_SIZES) for size in REGULAR_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_regular_{i}"): probability_regular, + ParameterAlias(f"k_any_{i}"): 1 - probability_regular, + }, + strict=True, + ) for i in range(3) + ], + + # Steps for `x`. (Benchmarks strided memory access.) + [ + FuzzedParameter( + name=f"step_{i}", + distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, + ) for i in range(3) + ], + ], + tensors=[ + FuzzedTensor( + name="x", + size=("k0", "k1", "k2"), + steps=("step_0", "step_1", "step_2"), + probability_contiguous=0.75, + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="ndim", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f810d0b9fadb3968b16eff7e6a5617d00168ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py @@ -0,0 +1,81 @@ +import numpy as np +import torch + +from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor + + +_MIN_DIM_SIZE = 16 +_MAX_DIM_SIZE = 16 * 1024 ** 2 +_POW_TWO_SIZES = tuple(2 ** i for i in range( + int(np.log2(_MIN_DIM_SIZE)), + int(np.log2(_MAX_DIM_SIZE)) + 1, +)) + + +class UnaryOpFuzzer(Fuzzer): + def __init__(self, seed, dtype=torch.float32, cuda=False): + super().__init__( + parameters=[ + # Dimensionality of x. (e.g. 1D, 2D, or 3D.) + FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), + + # Shapes for `x`. + # It is important to test all shapes, however + # powers of two are especially important and therefore + # warrant special attention. This is done by generating + # both a value drawn from all integers between the min and + # max allowed values, and another from only the powers of two + # (both distributions are loguniform) and then randomly + # selecting between the two. + [ + FuzzedParameter( + name=f"k_any_{i}", + minval=_MIN_DIM_SIZE, + maxval=_MAX_DIM_SIZE, + distribution="loguniform", + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k_pow2_{i}", + distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} + ) for i in range(3) + ], + [ + FuzzedParameter( + name=f"k{i}", + distribution={ + ParameterAlias(f"k_any_{i}"): 0.8, + ParameterAlias(f"k_pow2_{i}"): 0.2, + }, + strict=True, + ) for i in range(3) + ], + + # Steps for `x`. (Benchmarks strided memory access.) + [ + FuzzedParameter( + name=f"x_step_{i}", + distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, + ) for i in range(3) + ], + + # Repeatable entropy for downstream applications. + FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), + ], + tensors=[ + FuzzedTensor( + name="x", + size=("k0", "k1", "k2"), + steps=("x_step_0", "x_step_1", "x_step_2"), + probability_contiguous=0.75, + min_elements=4 * 1024, + max_elements=32 * 1024 ** 2, + max_allocation_bytes=2 * 1024**3, # 2 GB + dim_parameter="dim", + dtype=dtype, + cuda=cuda, + ), + ], + seed=seed, + ) diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22a4882b542ed760491876588f4f8d9dff87813 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74d6b70bb55d59beef73ba94936245ca26a6e783 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/_stubs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..659698b91db719fdf33442af4abbbe5f1018bf2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0116f25291aa2a65b0907eb0f495a041bdba6b55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compare.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a62ed2cb9294214c3e09d5d76ac41f8e22140ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/compile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16b1321bc8c7dce7109985779c546e43fe83e768 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/cpp_jit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/fuzzer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/fuzzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6632e8f34c85d13ed8c65d63bbb8cafdd88673d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/fuzzer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ceebec81edbf101c719a77cd6c999a3ae7f2ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/sparse_fuzzer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65515a12ed5a1c59e95c8090d0bf948814d38ab5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/timer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..13fdd22e2727d42892e7bc0d0efb2fc3eaf8d376 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/_stubs.py @@ -0,0 +1,40 @@ +from typing import Any, Callable, Dict, Protocol, runtime_checkable + + +class TimerClass(Protocol): + """This is the portion of the `timeit.Timer` API used by benchmark utils.""" + def __init__( + self, + stmt: str, + setup: str, + timer: Callable[[], float], + globals: Dict[str, Any], + **kwargs: Any, + ) -> None: + ... + + def timeit(self, number: int) -> float: + ... + + +@runtime_checkable +class TimeitModuleType(Protocol): + """Modules generated from `timeit_template.cpp`.""" + def timeit(self, number: int) -> float: + ... + + +class CallgrindModuleType(Protocol): + """Replicates the valgrind endpoints in `torch._C`. + + These bindings are used to collect Callgrind profiles on earlier versions + of PyTorch and will eventually be removed. + """ + __file__: str + __name__: str + + def _valgrind_supported_platform(self) -> bool: + ... + + def _valgrind_toggle(self) -> None: + ... diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7f678d823eeb4042665bb57a485fa1427c67141 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__pycache__/timer_interface.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__pycache__/timer_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4de4d39b4cbe36fcf64307e817153bf23f125728 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/__pycache__/timer_interface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h new file mode 100644 index 0000000000000000000000000000000000000000..f078cc82b95daf94d2bea51f1e1b1a8c12daea23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h @@ -0,0 +1,129 @@ + +/* + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (callgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of callgrind, a valgrind tool for cache simulation + and call tree tracing. + + Copyright (C) 2003-2017 Josef Weidendorfer. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (callgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + +#ifndef __CALLGRIND_H +#define __CALLGRIND_H + +#include "valgrind.h" + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE ORDER OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end. + + The identification ('C','T') for Callgrind has historical + reasons: it was called "Calltree" before. Besides, ('C','G') would + clash with cachegrind. + */ + +typedef + enum { + VG_USERREQ__DUMP_STATS = VG_USERREQ_TOOL_BASE('C','T'), + VG_USERREQ__ZERO_STATS, + VG_USERREQ__TOGGLE_COLLECT, + VG_USERREQ__DUMP_STATS_AT, + VG_USERREQ__START_INSTRUMENTATION, + VG_USERREQ__STOP_INSTRUMENTATION + } Vg_CallgrindClientRequest; + +/* Dump current state of cost centers, and zero them afterwards */ +#define CALLGRIND_DUMP_STATS \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS, \ + 0, 0, 0, 0, 0) + +/* Dump current state of cost centers, and zero them afterwards. + The argument is appended to a string stating the reason which triggered + the dump. This string is written as a description field into the + profile data dump. */ +#define CALLGRIND_DUMP_STATS_AT(pos_str) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS_AT, \ + pos_str, 0, 0, 0, 0) + +/* Zero cost centers */ +#define CALLGRIND_ZERO_STATS \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ZERO_STATS, \ + 0, 0, 0, 0, 0) + +/* Toggles collection state. + The collection state specifies whether the happening of events + should be noted or if they are to be ignored. Events are noted + by increment of counters in a cost center */ +#define CALLGRIND_TOGGLE_COLLECT \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__TOGGLE_COLLECT, \ + 0, 0, 0, 0, 0) + +/* Start full callgrind instrumentation if not already switched on. + When cache simulation is done, it will flush the simulated cache; + this will lead to an artificial cache warmup phase afterwards with + cache misses which would not have happened in reality. */ +#define CALLGRIND_START_INSTRUMENTATION \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__START_INSTRUMENTATION, \ + 0, 0, 0, 0, 0) + +/* Stop full callgrind instrumentation if not already switched off. + This flushes Valgrinds translation cache, and does no additional + instrumentation afterwards, which effectivly will run at the same + speed as the "none" tool (ie. at minimal slowdown). + Use this to bypass Callgrind aggregation for uninteresting code parts. + To start Callgrind in this mode to ignore the setup phase, use + the option "--instr-atstart=no". */ +#define CALLGRIND_STOP_INSTRUMENTATION \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STOP_INSTRUMENTATION, \ + 0, 0, 0, 0, 0) + +#endif /* __CALLGRIND_H */ diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/compat_bindings.cpp b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/compat_bindings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cd41f0de092f0b1488c8945edf2af80c6f9b596c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/compat_bindings.cpp @@ -0,0 +1,35 @@ +/* Used to collect profiles of old versions of PyTorch. */ +#include +#include + +bool _valgrind_supported_platform() { +#if defined(NVALGRIND) + return false; +#else + return true; +#endif +} + +void _valgrind_toggle() { +#if defined(NVALGRIND) + TORCH_CHECK(false, "Valgrind is not supported."); +#else + CALLGRIND_TOGGLE_COLLECT; +#endif +} + +void _valgrind_toggle_and_dump_stats() { +#if defined(NVALGRIND) + TORCH_CHECK(false, "Valgrind is not supported."); +#else + // NB: See note in Module.cpp + CALLGRIND_TOGGLE_COLLECT; + CALLGRIND_DUMP_STATS; +#endif +} + +PYBIND11_MODULE(callgrind_bindings, m) { + m.def("_valgrind_supported_platform", &_valgrind_supported_platform); + m.def("_valgrind_toggle", &_valgrind_toggle); + m.def("_valgrind_toggle_and_dump_stats", &_valgrind_dump_stats); +} diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_callgrind_template.cpp b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_callgrind_template.cpp new file mode 100644 index 0000000000000000000000000000000000000000..587685c7df7445b299c35462307f47cf6012a00d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_callgrind_template.cpp @@ -0,0 +1,68 @@ +/* C++ template for Timer.collect_callgrind + +This template will be consumed by `cpp_jit.py`, and will replace: + `GLOBAL_SETUP_TEMPLATE_LOCATION`, + `SETUP_TEMPLATE_LOCATION` + and + `STMT_TEMPLATE_LOCATION` +sections with user provided statements. +*/ + +#include +#include +#include + +#include + +// Global setup. (e.g. #includes) +// GLOBAL_SETUP_TEMPLATE_LOCATION + +#if defined(NVALGRIND) +static_assert(false); +#endif + +int main(int argc, char* argv[]) { + // This file should only be called inside of `Timer`, so we can adopt a + // very simple and rigid argument parsing scheme. + TORCH_CHECK(argc == 9); + TORCH_CHECK(std::string(argv[1]) == "--number"); + auto number = std::stoi(argv[2]); + + TORCH_CHECK( + std::string(argv[3]) == "--number-warmup" || + std::string(argv[3]) == "--number_warmup"); + auto number_warmup = std::stoi(argv[4]); + + TORCH_CHECK(std::string(argv[5]) == "--repeats"); + auto repeats = std::stoi(argv[6]); + + TORCH_CHECK( + std::string(argv[7]) == "--number-threads" || + std::string(argv[7]) == "--number_threads"); + auto number_threads = std::stoi(argv[8]); + torch::set_num_threads(number_threads); + + // Setup + // SETUP_TEMPLATE_LOCATION + + // Warmup + for (const auto i : c10::irange(number_warmup)) { + (void)i; + // STMT_TEMPLATE_LOCATION + } + + // Main loop + for (const auto repeat : c10::irange(repeats)) { + (void)repeat; + CALLGRIND_TOGGLE_COLLECT; + + for (const auto i : c10::irange(number)) { + (void)i; + // STMT_TEMPLATE_LOCATION + } + + // NB: See note in Module.cpp + CALLGRIND_TOGGLE_COLLECT; + CALLGRIND_DUMP_STATS; + } +} diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..5a3e9f635891d6605ef3ed12855f58cfa5254e56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py @@ -0,0 +1,907 @@ +"""Intermediate layer between `Timer` and `valgrind`.""" +import collections +import enum +import dataclasses +import itertools as it +import os +import pickle +import re +import shutil +import subprocess +import sys +import textwrap +from typing import ( + cast, Any, Callable, DefaultDict, Dict, Iterator, List, NamedTuple, + Optional, Tuple, Union, TYPE_CHECKING) + +import torch +from torch.utils.benchmark.utils import common, cpp_jit +from torch.utils.benchmark.utils._stubs import CallgrindModuleType +import operator + + +__all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"] + + +if TYPE_CHECKING: + CompletedProcessType = subprocess.CompletedProcess[str] +else: + CompletedProcessType = subprocess.CompletedProcess + + +class FunctionCount(NamedTuple): + # TODO(#105471): Rename the count field + count: int # type: ignore[assignment] + function: str + + +@dataclasses.dataclass(repr=False, eq=False, frozen=True) +class FunctionCounts: + """Container for manipulating Callgrind results. + + It supports: + 1) Addition and subtraction to combine or diff results. + 2) Tuple-like indexing. + 3) A `denoise` function which strips CPython calls which are known to + be non-deterministic and quite noisy. + 4) Two higher order methods (`filter` and `transform`) for custom + manipulation. + """ + _data: Tuple[FunctionCount, ...] + inclusive: bool + truncate_rows: bool = True + + # For normal use, torch._tensor_str.PRINT_OPTS.linewidth determines + # the print settings. This is simply to allow hermetic unit tests. + _linewidth: Optional[int] = None + + def __iter__(self) -> Iterator[FunctionCount]: + yield from self._data + + def __len__(self) -> int: + return len(self._data) + + def __getitem__(self, item: Any) -> Union[FunctionCount, "FunctionCounts"]: + data: Union[FunctionCount, Tuple[FunctionCount, ...]] = self._data[item] + return ( + FunctionCounts(cast(Tuple[FunctionCount, ...], data), self.inclusive, truncate_rows=False) + if isinstance(data, tuple) else data + ) + + def __repr__(self) -> str: + count_len = 0 + for c, _ in self: + # Account for sign in string length. + count_len = max(count_len, len(str(c)) + int(c < 0)) + + lines = [] + linewidth = self._linewidth or torch._tensor_str.PRINT_OPTS.linewidth + fn_str_len = max(linewidth - count_len - 4, 40) + for c, fn in self: + if len(fn) > fn_str_len: + left_len = int((fn_str_len - 5) // 2) + fn = fn[:left_len] + " ... " + fn[-(fn_str_len - left_len - 5):] + lines.append(f" {c:>{count_len}} {fn}") + + if self.truncate_rows and len(lines) > 18: + lines = lines[:9] + ["...".rjust(count_len + 2)] + lines[-9:] + + if not self.inclusive: + lines.extend(["", f"Total: {self.sum()}"]) + + return "\n".join([super().__repr__()] + lines) + + def __add__( + self, + other: "FunctionCounts", + ) -> "FunctionCounts": + return self._merge(other, lambda c: c) + + def __sub__( + self, + other: "FunctionCounts", + ) -> "FunctionCounts": + return self._merge(other, operator.neg) + + def __mul__(self, other: Union[int, float]) -> "FunctionCounts": + return self._from_dict({ + fn: int(c * other) for c, fn in self._data + }, self.inclusive) + + def transform(self, map_fn: Callable[[str], str]) -> "FunctionCounts": + """Apply `map_fn` to all of the function names. + + This can be used to regularize function names (e.g. stripping irrelevant + parts of the file path), coalesce entries by mapping multiple functions + to the same name (in which case the counts are added together), etc. + """ + counts: DefaultDict[str, int] = collections.defaultdict(int) + for c, fn in self._data: + counts[map_fn(fn)] += c + + return self._from_dict(counts, self.inclusive) + + def filter(self, filter_fn: Callable[[str], bool]) -> "FunctionCounts": + """Keep only the elements where `filter_fn` applied to function name returns True.""" + return FunctionCounts(tuple(i for i in self if filter_fn(i.function)), self.inclusive) + + def sum(self) -> int: + return sum(c for c, _ in self) + + def denoise(self) -> "FunctionCounts": + """Remove known noisy instructions. + + Several instructions in the CPython interpreter are rather noisy. These + instructions involve unicode to dictionary lookups which Python uses to + map variable names. FunctionCounts is generally a content agnostic + container, however this is sufficiently important for obtaining + reliable results to warrant an exception.""" + return self.filter(lambda fn: "dictobject.c:lookdict_unicode" not in fn) + + def _merge( + self, + second: "FunctionCounts", + merge_fn: Callable[[int], int] + ) -> "FunctionCounts": + assert self.inclusive == second.inclusive, "Cannot merge inclusive and exclusive counts." + counts: DefaultDict[str, int] = collections.defaultdict(int) + for c, fn in self: + counts[fn] += c + + for c, fn in second: + counts[fn] += merge_fn(c) + + return self._from_dict(counts, self.inclusive) + + @staticmethod + def _from_dict(counts: Dict[str, int], inclusive: bool) -> "FunctionCounts": + flat_counts = (FunctionCount(c, fn) for fn, c in counts.items() if c) + return FunctionCounts(tuple(sorted(flat_counts, reverse=True)), inclusive) + + +@dataclasses.dataclass(repr=False, eq=False, frozen=True) +class CallgrindStats: + """Top level container for Callgrind results collected by Timer. + + Manipulation is generally done using the FunctionCounts class, which is + obtained by calling `CallgrindStats.stats(...)`. Several convenience + methods are provided as well; the most significant is + `CallgrindStats.as_standardized()`. + """ + task_spec: common.TaskSpec + number_per_run: int + built_with_debug_symbols: bool + baseline_inclusive_stats: FunctionCounts + baseline_exclusive_stats: FunctionCounts + stmt_inclusive_stats: FunctionCounts + stmt_exclusive_stats: FunctionCounts + stmt_callgrind_out: Optional[str] + + def __repr__(self) -> str: + newline = "\n" # `\` cannot appear in fstring code section. + base_stats = self.baseline_exclusive_stats + output = f""" +{super().__repr__()} +{self.task_spec.summarize()} + {'':>25}All{'':>10}Noisy symbols removed + Instructions: {self.counts(denoise=False):>12}{'':>15}{self.counts(denoise=True):>12} + Baseline: {base_stats.sum():>12}{'':>15}{base_stats.denoise().sum():>12} +{self.number_per_run} runs per measurement, {self.task_spec.num_threads} thread{'s' if self.task_spec.num_threads > 1 else ''} +""".strip() + if not self.built_with_debug_symbols: + output += textwrap.dedent(""" + Warning: PyTorch was not built with debug symbols. + Source information may be limited. Rebuild with + REL_WITH_DEB_INFO=1 for more detailed results.""") + return output + + def stats(self, inclusive: bool = False) -> FunctionCounts: + """Returns detailed function counts. + + Conceptually, the FunctionCounts returned can be thought of as a tuple + of (count, path_and_function_name) tuples. + + `inclusive` matches the semantics of callgrind. If True, the counts + include instructions executed by children. `inclusive=True` is useful + for identifying hot spots in code; `inclusive=False` is useful for + reducing noise when diffing counts from two different runs. (See + CallgrindStats.delta(...) for more details) + """ + return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats + + def counts(self, *, denoise: bool = False) -> int: + """Returns the total number of instructions executed. + + See `FunctionCounts.denoise()` for an explanation of the `denoise` arg. + """ + stats = self.stmt_exclusive_stats + return (stats.denoise() if denoise else stats).sum() + + # FIXME: Once 3.7 is the minimum version, type annotate `other` per PEP 563 + def delta( + self, + other: "CallgrindStats", + inclusive: bool = False, + ) -> FunctionCounts: + """Diff two sets of counts. + + One common reason to collect instruction counts is to determine the + the effect that a particular change will have on the number of instructions + needed to perform some unit of work. If a change increases that number, the + next logical question is "why". This generally involves looking at what part + if the code increased in instruction count. This function automates that + process so that one can easily diff counts on both an inclusive and + exclusive basis. + """ + return self.stats(inclusive=inclusive) - other.stats(inclusive=inclusive) + + def as_standardized(self) -> "CallgrindStats": + """Strip library names and some prefixes from function strings. + + When comparing two different sets of instruction counts, on stumbling + block can be path prefixes. Callgrind includes the full filepath + when reporting a function (as it should). However, this can cause + issues when diffing profiles. If a key component such as Python + or PyTorch was built in separate locations in the two profiles, which + can result in something resembling:: + + 23234231 /tmp/first_build_dir/thing.c:foo(...) + 9823794 /tmp/first_build_dir/thing.c:bar(...) + ... + 53453 .../aten/src/Aten/...:function_that_actually_changed(...) + ... + -9823794 /tmp/second_build_dir/thing.c:bar(...) + -23234231 /tmp/second_build_dir/thing.c:foo(...) + + Stripping prefixes can ameliorate this issue by regularizing the + strings and causing better cancellation of equivalent call sites + when diffing. + """ + def strip(stats: FunctionCounts) -> FunctionCounts: + transforms = ( + # PyTorch may have been built in different locations. + (r"^.+build/\.\./", "build/../"), + (r"^.+/" + re.escape("build/aten/"), "build/aten/"), + + # "Python" and "Objects" come from CPython. + (r"^.+/" + re.escape("Python/"), "Python/"), + (r"^.+/" + re.escape("Objects/"), "Objects/"), + + # Strip library name. e.g. `libtorch.so` + (r"\s\[.+\]$", ""), + ) + + for before, after in transforms: + stats = stats.transform(lambda fn: re.sub(before, after, fn)) + + return stats + + return CallgrindStats( + task_spec=self.task_spec, + number_per_run=self.number_per_run, + built_with_debug_symbols=self.built_with_debug_symbols, + baseline_inclusive_stats=strip(self.baseline_inclusive_stats), + baseline_exclusive_stats=strip(self.baseline_exclusive_stats), + stmt_inclusive_stats=strip(self.stmt_inclusive_stats), + stmt_exclusive_stats=strip(self.stmt_exclusive_stats), + + # `as_standardized` will change symbol names, so the contents will + # no longer map directly to `callgrind.out` + stmt_callgrind_out=None, + ) + + +class Serialization(enum.Enum): + PICKLE = 0 + TORCH = 1 + TORCH_JIT = 2 + + +_GLOBALS_ALLOWED_TYPES: Dict[Serialization, Tuple[Any, ...]] = { + Serialization.PICKLE: (str, bytes, bool, int, float, complex), + Serialization.TORCH_JIT: (torch.jit.ScriptFunction, torch.jit.ScriptModule), + Serialization.TORCH: (torch.nn.Module,), +} + + +class CopyIfCallgrind: + """Signal that a global may be replaced with a deserialized copy. + + See `GlobalsBridge` for why this matters. + """ + def __init__(self, value: Any, *, setup: Optional[str] = None): + for method, supported_types in _GLOBALS_ALLOWED_TYPES.items(): + if any(isinstance(value, t) for t in supported_types): + self._value: Any = value + self._setup: Optional[str] = setup + self._serialization: Serialization = method + break + else: + supported_str = "\n".join([ + getattr(t, "__name__", repr(t)) + for t in it.chain(_GLOBALS_ALLOWED_TYPES.values())]) + + raise ValueError( + f"Unsupported type: {type(value)}\n" + f"`collect_callgrind` restricts globals to the following types:\n" + f"{textwrap.indent(supported_str, ' ')}" + ) + + @property + def value(self) -> Any: + return self._value + + @property + def setup(self) -> Optional[str]: + return self._setup + + @property + def serialization(self) -> Serialization: + return self._serialization + + @staticmethod + def unwrap_all(globals: Dict[str, Any]) -> Dict[str, Any]: + return { + k: (v.value if isinstance(v, CopyIfCallgrind) else v) + for k, v in globals.items() + } + + +class GlobalsBridge: + """Handle the transfer of (certain) globals when collecting Callgrind statistics. + + Key takeaway: Any globals passed must be wrapped in `CopyIfCallgrind` to + work with `Timer.collect_callgrind`. + + Consider the following code snippet: + ``` + import pickle + import timeit + + class Counter: + value = 0 + + def __call__(self): + self.value += 1 + + counter = Counter() + timeit.Timer("counter()", globals={"counter": counter}).timeit(10) + print(counter.value) # 10 + + timeit.Timer( + "counter()", + globals={"counter": pickle.loads(pickle.dumps(counter))} + ).timeit(20) + print(counter.value) # Still 10 + ``` + + In the first case, `stmt` is executed using the objects in `globals`; + however, the addition of serialization and deserialization changes the + semantics and may meaningfully change behavior. + + This is a practical consideration when collecting Callgrind statistics. + Unlike `exec` based execution (which `timeit` uses under the hood) which + can share in-memory data structures with the caller, Callgrind collection + requires an entirely new process in order to run under Valgrind. This means + that any data structures used for statement execution will have to be + serialized and deserialized in the subprocess. + + In order to avoid surprising semantics from (user invisible) process + boundaries, what can be passed through `globals` is severely restricted + for `Timer.collect_callgrind`. It is expected that most setup should be + achievable (albeit perhaps less ergonomically) by passing a `setup` + string. + + There are, however, exceptions. One such class are TorchScripted functions. + Because they require a concrete file with source code it is not possible + to define them using a `setup` string. Another group are torch.nn.Modules, + whose construction can be complex and prohibitively cumbersome to coerce + into a `setup` string. Finally, most builtin types are sufficiently well + behaved and sufficiently common to warrant allowing as well. (e.g. + `globals={"n": 1}` is very convenient.) + + Fortunately, all have well defined serialization semantics. This class + is responsible for enabling the Valgrind subprocess to use elements in + `globals` so long as they are an allowed type. + + Caveats: + The user is required to acknowledge this serialization by wrapping + elements in `globals` with `CopyIfCallgrind`. + + While ScriptFunction and ScriptModule are expected to save and load + quite robustly, it is up to the user to ensure that an nn.Module can + un-pickle successfully. + + `torch.Tensor` and `np.ndarray` are deliberately excluded. The + serialization/deserialization process perturbs the representation of a + tensor in ways that could result in incorrect measurements. For example, + if a tensor lives in pinned CPU memory, this fact would not be preserved + by a dump, and that will in turn change the performance of certain CUDA + operations. + """ + + def __init__(self, globals: Dict[str, Any], data_dir: str) -> None: + self._globals: Dict[str, CopyIfCallgrind] = {} + self._data_dir = data_dir + if not os.path.exists(data_dir): + os.mkdir(data_dir) + + if globals.get("torch", torch) is not torch: + raise ValueError("`collect_callgrind` does not support mocking out `torch`.") + + for name, value in globals.items(): + if name in ("torch", "__builtins__"): + # Torch will be imported by the collection script, and + # __builtins__ is added by Timer. + continue + + if not isinstance(value, CopyIfCallgrind): + raise ValueError( + "`collect_callgrind` requires that globals be wrapped in " + "`CopyIfCallgrind` so that serialization is explicit." + ) + + self._globals[name] = value + + def construct(self) -> str: + load_lines = [] + for name, wrapped_value in self._globals.items(): + if wrapped_value.setup is not None: + load_lines.append(textwrap.dedent(wrapped_value.setup)) + + if wrapped_value.serialization == Serialization.PICKLE: + path = os.path.join(self._data_dir, f"{name}.pkl") + load_lines.append( + f"with open({repr(path)}, 'rb') as f:\n {name} = pickle.load(f)") + with open(path, "wb") as f: + pickle.dump(wrapped_value.value, f) + + elif wrapped_value.serialization == Serialization.TORCH: + path = os.path.join(self._data_dir, f"{name}.pt") + load_lines.append(f"{name} = torch.load({repr(path)})") + torch.save(wrapped_value.value, path) + + elif wrapped_value.serialization == Serialization.TORCH_JIT: + path = os.path.join(self._data_dir, f"{name}.pt") + load_lines.append(f"{name} = torch.jit.load({repr(path)})") + with open(path, "wb") as f: + torch.jit.save(wrapped_value.value, f) # type: ignore[no-untyped-call] + + else: + raise NotImplementedError( + f"Unknown serialization method: {wrapped_value.serialization}") + + return "\n".join(load_lines) + + +class _ValgrindWrapper: + def __init__(self) -> None: + self._bindings_module: Optional[CallgrindModuleType] = None + valgrind_symbols = ( + "_valgrind_supported_platform", + "_valgrind_toggle", + "_valgrind_toggle_and_dump_stats", + ) + if all(hasattr(torch._C, symbol) for symbol in valgrind_symbols): + self._supported_platform: bool = torch._C._valgrind_supported_platform() + + else: + print("Callgrind bindings are not present in `torch._C`. JIT-ing bindings.") + self._bindings_module = cpp_jit.get_compat_bindings() + assert all(hasattr(self._bindings_module, symbol) for symbol in valgrind_symbols) + self._supported_platform = self._bindings_module._valgrind_supported_platform() + + self._commands_available: Dict[str, bool] = {} + if self._supported_platform: + # Only bother checking on supported platforms. + for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"): + self._commands_available[cmd] = not subprocess.run( + ["which", cmd], + capture_output=True, + check=False, + ).returncode + + self._build_type: Optional[str] = None + build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show()) # type: ignore[no-untyped-call] + if build_search is not None: + self._build_type = build_search.groups()[0].split(",")[0] + + def _validate(self) -> None: + if not self._supported_platform: + raise OSError("Valgrind is not supported on this platform.") + + missing_cmds = [cmd for cmd, available in self._commands_available.items() if not available] + if missing_cmds: + raise OSError("Missing: " + ", ".join(missing_cmds)) + + def collect_callgrind( + self, + task_spec: common.TaskSpec, + globals: Dict[str, Any], + *, + number: int, + repeats: int, + collect_baseline: bool, + is_python: bool, + retain_out_file: bool, + ) -> Tuple[CallgrindStats, ...]: + """Collect stats, and attach a reference run which can be used to filter interpreter overhead.""" + self._validate() + assert is_python or not collect_baseline + + *task_stats, baseline_stats = self._invoke( + task_spec=task_spec, + globals=globals, + number=number, + repeats=repeats, + collect_baseline=collect_baseline, + is_python=is_python, + retain_out_file=retain_out_file, + ) + assert len(task_stats) == repeats + + return tuple( + CallgrindStats( + task_spec=task_spec, + number_per_run=number, + built_with_debug_symbols=self._build_type == "RelWithDebInfo", + baseline_inclusive_stats=baseline_stats[0], + baseline_exclusive_stats=baseline_stats[1], + stmt_inclusive_stats=stmt_inclusive_stats, + stmt_exclusive_stats=stmt_exclusive_stats, + stmt_callgrind_out=out_contents, + ) + for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats + ) + + def _invoke( + self, + *, + task_spec: common.TaskSpec, + globals: Dict[str, Any], + number: int, + repeats: int, + collect_baseline: bool, + is_python: bool, + retain_out_file: bool, + ) -> Tuple[Tuple[FunctionCounts, FunctionCounts, Optional[str]], ...]: + """Core invocation method for Callgrind collection. + + Valgrind operates by effectively replacing the CPU with an emulated + version which allows it to instrument any code at the cost of severe + performance degradation. This has the practical effect that in order + to collect Callgrind statistics, a new process has to be created + running under `valgrind`. The steps for this process are: + + 1) Create a scratch directory. + 2) Codegen a run script. (_ValgrindWrapper._construct_script) + Inside the run script: + * Validate that Python and torch match the parent process + * Validate that it is indeed running under valgrind + * Execute `setup` and warm up `stmt` + * Begin collecting stats + * Run the `stmt` loop + * Stop collecting stats + 3) Parse the run results. + 4) Cleanup the scratch directory. + """ + working_dir = common._make_temp_dir(prefix="callgrind") + data_dir = os.path.join(working_dir, "data") + script_file = os.path.join(working_dir, "timer_callgrind.py") + callgrind_out = os.path.join(working_dir, "callgrind.out") + error_log = os.path.join(working_dir, "error.txt") + stat_log = os.path.join(working_dir, "callgrind_stat.txt") + stdout_stderr_log = os.path.join(working_dir, "stdout_stderr.log") + + def run(args: List[str], **kwargs: Any) -> Tuple[CompletedProcessType, str]: + # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ + f_stdout_stderr = open(stdout_stderr_log, "wb") + try: + invocation = subprocess.run( + args, + stdout=f_stdout_stderr, + stderr=subprocess.STDOUT, + **kwargs, + ) + with open(stdout_stderr_log) as f: + return invocation, f.read() + finally: + f_stdout_stderr.close() + + try: + if is_python: + if self._bindings_module is not None: + shutil.copy( + self._bindings_module.__file__, + os.path.join(working_dir, os.path.split(self._bindings_module.__file__)[1]) + ) + + script_file = os.path.join(working_dir, "timer_callgrind.py") + with open(script_file, "w") as f: + f.write(self._construct_script( + task_spec, + globals=GlobalsBridge(globals, data_dir), + number=number, + repeats=repeats, + collect_baseline=collect_baseline, + error_log=error_log, + stat_log=stat_log, + bindings=self._bindings_module)) + + run_loop_cmd = ["python", script_file] + else: + assert not collect_baseline + run_loop_exec = cpp_jit.compile_callgrind_template( + stmt=task_spec.stmt, + setup=task_spec.setup, + global_setup=task_spec.global_setup, + ) + run_loop_cmd = [ + run_loop_exec, + "--number", str(number), + "--number-warmup", str(min(number, 10)), + "--repeats", str(repeats), + "--number-threads", str(task_spec.num_threads), + ] + + valgrind_invocation, valgrind_invocation_output = run([ + "valgrind", + "--tool=callgrind", + f"--callgrind-out-file={callgrind_out}", + "--dump-line=yes", + "--dump-instr=yes", + "--instr-atstart=yes", + "--collect-atstart=no", + ] + run_loop_cmd) + + if valgrind_invocation.returncode: + error_report = "" + if os.path.exists(error_log): + with open(error_log) as f: + error_report = f.read() + if not error_report: + error_report = "Unknown error.\n" + valgrind_invocation_output + + raise OSError(f"Failed to collect callgrind profile:\n{error_report}") + + def parse_output(fpath: str, inclusive: bool) -> FunctionCounts: + annotate_invocation, annotate_invocation_output = run([ + "callgrind_annotate", + f"--inclusive={'yes' if inclusive else 'no'}", + "--threshold=100", + "--show-percs=no", + fpath + ], check=True) + + total_pattern = re.compile(r"^([0-9,]+)\s+PROGRAM TOTALS") + begin_pattern = re.compile(r"Ir\s+file:function") + function_pattern = re.compile(r"^\s*([0-9,]+)\s+(.+:.+)$") + + class ScanState(enum.Enum): + SCANNING_FOR_TOTAL = 0 + SCANNING_FOR_START = 1 + PARSING = 2 + + scan_state = ScanState.SCANNING_FOR_TOTAL + fn_counts = [] + for l in annotate_invocation_output.splitlines(keepends=False): + if scan_state == ScanState.SCANNING_FOR_TOTAL: + total_match = total_pattern.match(l) + if total_match: + program_totals = int(total_match.groups()[0].replace(",", "")) + scan_state = ScanState.SCANNING_FOR_START + + elif scan_state == ScanState.SCANNING_FOR_START: + if begin_pattern.match(l): + scan_state = ScanState.PARSING + + else: + assert scan_state == ScanState.PARSING + fn_match = function_pattern.match(l) + if fn_match: + ir_str, file_function = fn_match.groups() + ir = int(ir_str.replace(",", "")) + if ir == program_totals: # type: ignore[possibly-undefined] + # Callgrind includes some top level red herring symbols when + # a program dumps multiple profiles. + continue + fn_counts.append(FunctionCount(ir, file_function)) + + elif re.match(r"-+", l): + # Ignore heading separator lines. + continue + + else: + break + + assert scan_state == ScanState.PARSING, f"Failed to parse {fpath}" + return FunctionCounts(tuple(sorted(fn_counts, reverse=True)), inclusive=inclusive) + + def read_results(i: int) -> Tuple[FunctionCounts, FunctionCounts, Optional[str]]: + if i == repeats and not collect_baseline: + # Null baseline. + return ( + FunctionCounts((), inclusive=True), + FunctionCounts((), inclusive=False), + None, + ) + + fpath = f"{callgrind_out}.{i + 1}" # Callgrind one-indexes files. + callgrind_out_contents: Optional[str] = None + if retain_out_file: + with open(fpath) as f: + callgrind_out_contents = f.read() + + return ( + parse_output(fpath, inclusive=True), + parse_output(fpath, inclusive=False), + callgrind_out_contents + ) + + return tuple(read_results(i) for i in range(repeats + 1)) + finally: + shutil.rmtree(working_dir) + + @staticmethod + def _construct_script( + task_spec: common.TaskSpec, + globals: GlobalsBridge, + *, + number: int, + repeats: int, + collect_baseline: bool, + error_log: str, + stat_log: str, + bindings: Optional[CallgrindModuleType], + ) -> str: + def block_stmt(stmt: str, indent: int = 0) -> str: + """Partially unroll benchmark loop. + + The naive template looks something like: + "for _ in range({number}): {stmt}" + + However a loop in Python is surprisingly expensive, and significantly + increases the number of background Python instructions. So instead we + partially unroll the loops, with a block size of 100 chosen to keep + the instruction overhead from `range` low while also not ballooning + the size of the generated file. + """ + block_size = 100 + loop_count = number // block_size + if loop_count == 1: + # There is no point in having `for _ in range(1): ...` rather + # than just `...`, and this lets us save shave a few background + # instructions. + loop_count = 0 + remainder = number - block_size * loop_count + blocked_stmt = "" + + if loop_count: + unrolled_stmts = textwrap.indent("\n".join([stmt] * block_size), " " * 4) + blocked_stmt += f"for _ in range({loop_count}):\n{unrolled_stmts}\n" + + if remainder: + blocked_stmt += "\n".join([stmt] * remainder) + + return textwrap.indent(blocked_stmt, " " * indent) + + pass_baseline = ( + "callgrind_bindings._valgrind_toggle()\n" + f"{block_stmt('pass')}\n" + "callgrind_bindings._valgrind_toggle_and_dump_stats()" + ) + + return textwrap.dedent(r""" + import gc + import os + import pickle + import subprocess + import sys + import time + + # Mitigate https://github.com/pytorch/pytorch/issues/37377 + # which can sometimes cause the subprocess call to fail. + import numpy as np + + import torch + torch.set_num_threads({num_threads}) + + {bindings_import} + + PID = os.getpid() + + def log_failure(msg): + with open({error_log_repr}, "wt") as f: + f.write(msg) + sys.exit(1) + + def check_result(completed_process): + if completed_process.returncode: + log_failure(f"Command failed: {{' '.join(completed_process.args)}}") + return completed_process + + # ============================================================================= + # == Check that subprocess matches parent ===================================== + # ============================================================================= + if os.path.realpath(sys.executable) != "{parent_interpreter}": + log_failure( + "Interpreter mismatch:\n" + f" {{os.path.realpath(sys.executable)}}\n vs.\n {parent_interpreter}" + ) + + if torch.__file__ != "{torch_file}": + log_failure( + "PyTorch does not match expected file:\n" + f" {{torch.__file__}}\n vs.\n {torch_file}" + ) + + # ============================================================================= + # == User specified setup ===================================================== + # ============================================================================= + # Load serialized globals + {load_globals} + + # User setup str + {setup} + + for _ in range({warmup_number}): + {indented_stmt} + + # ============================================================================= + # == Callgrind management ===================================================== + # ============================================================================= + with open("{stat_log}", "wb") as stat_file: + # If many instances of callgrind are running at once, the output of + # `callgrind_control` may exceed 16kb which would cause `subprocess.PIPE` + # to deadlock. So instead we use a file. + callgrind_stat = check_result(subprocess.run( + ["callgrind_control", "--stat"], + stdout=stat_file, + stderr=subprocess.STDOUT, + )) + + with open("{stat_log}", "rt") as stat_file: + stat_lines = stat_file.read().splitlines() + + if f"PID {{PID}}: python {{__file__}}" not in stat_lines: + log_failure("Process does not appear to be running callgrind.") + + gc.collect() + time.sleep(0.01) + + # ============================================================================= + # == User code block ========================================================== + # ============================================================================= + for _ in range({repeats}): + callgrind_bindings._valgrind_toggle() + {blocked_stmt} + callgrind_bindings._valgrind_toggle_and_dump_stats() + gc.collect() + + {baseline} + """).strip().format( + indented_stmt=textwrap.indent(task_spec.stmt, " " * 4), + blocked_stmt=block_stmt(task_spec.stmt, indent=4), + baseline=(pass_baseline if collect_baseline else ""), + number=number, + repeats=repeats, + load_globals=globals.construct(), + setup=task_spec.setup, + warmup_number=min(number, 10), + num_threads=task_spec.num_threads, + error_log_repr=repr(error_log), + stat_log=stat_log, + parent_interpreter=os.path.realpath(sys.executable), + torch_file=torch.__file__, + bindings_import=( + "import torch._C as callgrind_bindings" if bindings is None + else f"import {bindings.__name__} as callgrind_bindings"), + ) + + +CALLGRIND_SINGLETON: Optional[_ValgrindWrapper] = None +def wrapper_singleton() -> _ValgrindWrapper: + global CALLGRIND_SINGLETON + if CALLGRIND_SINGLETON is None: + CALLGRIND_SINGLETON = _ValgrindWrapper() + return CALLGRIND_SINGLETON diff --git a/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h new file mode 100644 index 0000000000000000000000000000000000000000..d33dd30932aa86b8284cb93d0e29ec646e820197 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h @@ -0,0 +1,7157 @@ +/* -*- c -*- + ---------------------------------------------------------------- + + Notice that the following BSD-style license applies to this one + file (valgrind.h) only. The rest of Valgrind is licensed under the + terms of the GNU General Public License, version 2, unless + otherwise indicated. See the COPYING file in the source + distribution for details. + + ---------------------------------------------------------------- + + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2000-2017 Julian Seward. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + + 3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---------------------------------------------------------------- + + Notice that the above BSD-style license applies to this one file + (valgrind.h) only. The entire rest of Valgrind is licensed under + the terms of the GNU General Public License, version 2. See the + COPYING file in the source distribution for details. + + ---------------------------------------------------------------- +*/ + + +/* This file is for inclusion into client (your!) code. + + You can use these macros to manipulate and query Valgrind's + execution inside your own programs. + + The resulting executables will still run without Valgrind, just a + little bit more slowly than they otherwise would, but otherwise + unchanged. When not running on valgrind, each client request + consumes very few (eg. 7) instructions, so the resulting performance + loss is negligible unless you plan to execute client requests + millions of times per second. Nevertheless, if that is still a + problem, you can compile with the NVALGRIND symbol defined (gcc + -DNVALGRIND) so that client requests are not even compiled in. */ + +#ifndef __VALGRIND_H +#define __VALGRIND_H + + +/* ------------------------------------------------------------------ */ +/* VERSION NUMBER OF VALGRIND */ +/* ------------------------------------------------------------------ */ + +/* Specify Valgrind's version number, so that user code can + conditionally compile based on our version number. Note that these + were introduced at version 3.6 and so do not exist in version 3.5 + or earlier. The recommended way to use them to check for "version + X.Y or later" is (eg) + +#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ + && (__VALGRIND_MAJOR__ > 3 \ + || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) +*/ +#define __VALGRIND_MAJOR__ 3 +#define __VALGRIND_MINOR__ 17 + + +#include + +/* Nb: this file might be included in a file compiled with -ansi. So + we can't use C++ style "//" comments nor the "asm" keyword (instead + use "__asm__"). */ + +/* Derive some tags indicating what the target platform is. Note + that in this file we're using the compiler's CPP symbols for + identifying architectures, which are different to the ones we use + within the rest of Valgrind. Note, __powerpc__ is active for both + 32 and 64-bit PPC, whereas __powerpc64__ is only active for the + latter (on Linux, that is). + + Misc note: how to find out what's predefined in gcc by default: + gcc -Wp,-dM somefile.c +*/ +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_arm64_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_nanomips_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + + +#if defined(__APPLE__) && defined(__i386__) +# define PLAT_x86_darwin 1 +#elif defined(__APPLE__) && defined(__x86_64__) +# define PLAT_amd64_darwin 1 +#elif (defined(__MINGW32__) && defined(__i386__)) \ + || defined(__CYGWIN32__) \ + || (defined(_WIN32) && defined(_M_IX86)) +# define PLAT_x86_win32 1 +#elif (defined(__MINGW32__) && defined(__x86_64__)) \ + || (defined(_WIN32) && defined(_M_X64)) +/* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */ +# define PLAT_amd64_win64 1 +#elif defined(__linux__) && defined(__i386__) +# define PLAT_x86_linux 1 +#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) +# define PLAT_amd64_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) +# define PLAT_ppc32_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 +/* Big Endian uses ELF version 1 */ +# define PLAT_ppc64be_linux 1 +#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 +/* Little Endian uses ELF version 2 */ +# define PLAT_ppc64le_linux 1 +#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) +# define PLAT_arm_linux 1 +#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) +# define PLAT_arm64_linux 1 +#elif defined(__linux__) && defined(__s390__) && defined(__s390x__) +# define PLAT_s390x_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==64) +# define PLAT_mips64_linux 1 +#elif defined(__linux__) && defined(__mips__) && (__mips==32) +# define PLAT_mips32_linux 1 +#elif defined(__linux__) && defined(__nanomips__) +# define PLAT_nanomips_linux 1 +#elif defined(__sun) && defined(__i386__) +# define PLAT_x86_solaris 1 +#elif defined(__sun) && defined(__x86_64__) +# define PLAT_amd64_solaris 1 +#else +/* If we're not compiling for our target platform, don't generate + any inline asms. */ +# if !defined(NVALGRIND) +# define NVALGRIND 1 +# endif +#endif + + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ +/* in here of use to end-users -- skip to the next section. */ +/* ------------------------------------------------------------------ */ + +/* + * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client + * request. Accepts both pointers and integers as arguments. + * + * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind + * client request that does not return a value. + + * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind + * client request and whose value equals the client request result. Accepts + * both pointers and integers as arguments. Note that such calls are not + * necessarily pure functions -- they may have side effects. + */ + +#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ + _zzq_request, _zzq_arg1, _zzq_arg2, \ + _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ + _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ + (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) + +#if defined(NVALGRIND) + +/* Define NVALGRIND to completely remove the Valgrind magic sequence + from the compiled code (analogous to NDEBUG's effects on + assert()) */ +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + (_zzq_default) + +#else /* ! NVALGRIND */ + +/* The following defines the magic code sequences which the JITter + spots and handles magically. Don't look too closely at them as + they will rot your brain. + + The assembly code sequences for all architectures is in this one + file. This is because this file must be stand-alone, and we don't + want to have multiple files. + + For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default + value gets put in the return slot, so that everything works when + this is executed not under Valgrind. Args are passed in a memory + block, and so there's no intrinsic limit to the number that could + be passed, but it's currently five. + + The macro args are: + _zzq_rlval result lvalue + _zzq_default default value (result returned when running on real CPU) + _zzq_request request code + _zzq_arg1..5 request params + + The other two macros are used to support function wrapping, and are + a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the + guest's NRADDR pseudo-register and whatever other information is + needed to safely run the call original from the wrapper: on + ppc64-linux, the R2 value at the divert point is also needed. This + information is abstracted into a user-visible type, OrigFn. + + VALGRIND_CALL_NOREDIR_* behaves the same as the following on the + guest, but guarantees that the branch instruction will not be + redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: + branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a + complete inline asm, since it needs to be combined with more magic + inline asm stuff to be useful. +*/ + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ + || defined(PLAT_x86_solaris) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "roll $3, %%edi ; roll $13, %%edi\n\t" \ + "roll $29, %%edi ; roll $19, %%edi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EDX = client_request ( %EAX ) */ \ + "xchgl %%ebx,%%ebx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + "xchgl %%ecx,%%ecx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%EAX */ \ + "xchgl %%edx,%%edx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgl %%edi,%%edi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) + || PLAT_x86_solaris */ + +/* ------------------------- x86-Win32 ------------------------- */ + +#if defined(PLAT_x86_win32) && !defined(__GNUC__) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#if defined(_MSC_VER) + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm rol edi, 3 __asm rol edi, 13 \ + __asm rol edi, 29 __asm rol edi, 19 + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ + (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ + (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ + (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) + +static __inline uintptr_t +valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, + uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, + uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, + uintptr_t _zzq_arg5) +{ + volatile uintptr_t _zzq_args[6]; + volatile unsigned int _zzq_result; + _zzq_args[0] = (uintptr_t)(_zzq_request); + _zzq_args[1] = (uintptr_t)(_zzq_arg1); + _zzq_args[2] = (uintptr_t)(_zzq_arg2); + _zzq_args[3] = (uintptr_t)(_zzq_arg3); + _zzq_args[4] = (uintptr_t)(_zzq_arg4); + _zzq_args[5] = (uintptr_t)(_zzq_arg5); + __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default + __SPECIAL_INSTRUCTION_PREAMBLE + /* %EDX = client_request ( %EAX ) */ + __asm xchg ebx,ebx + __asm mov _zzq_result, edx + } + return _zzq_result; +} + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %EAX = guest_NRADDR */ \ + __asm xchg ecx,ecx \ + __asm mov __addr, eax \ + } \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_EAX ERROR + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ + __asm xchg edi,edi \ + } \ + } while (0) + +#else +#error Unsupported compiler. +#endif + +#endif /* PLAT_x86_win32 */ + +/* ----------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) \ + || (defined(PLAT_amd64_win64) && defined(__GNUC__)) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ + "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RAX = guest_NRADDR */ \ + "xchgq %%rcx,%%rcx" \ + : "=a" (__addr) \ + : \ + : "cc", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_RAX \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%RAX */ \ + "xchgq %%rdx,%%rdx\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "xchgq %%rdi,%%rdi\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------- amd64-Win64 ------------------------- */ + +#if defined(PLAT_amd64_win64) && !defined(__GNUC__) + +#error Unsupported compiler. + +#endif /* PLAT_amd64_win64 */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ + "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned int _zzq_args[6]; \ + unsigned int _zzq_result; \ + unsigned int* _zzq_ptr; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + unsigned long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned long int _zzq_args[6]; \ + unsigned long int _zzq_result; \ + unsigned long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R11 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +#if defined(PLAT_ppc64le_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + unsigned long int r2; /* what tocptr do we need? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ + "rotldi 0,0,61 ; rotldi 0,0,51\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({ unsigned long int _zzq_args[6]; \ + unsigned long int _zzq_result; \ + unsigned long int* _zzq_ptr; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + _zzq_ptr = _zzq_args; \ + __asm__ volatile("mr 3,%1\n\t" /*default*/ \ + "mr 4,%2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = client_request ( %R4 ) */ \ + "or 1,1,1\n\t" \ + "mr %0,3" /*result*/ \ + : "=b" (_zzq_result) \ + : "b" (_zzq_default), "b" (_zzq_ptr) \ + : "cc", "memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR */ \ + "or 2,2,2\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %R3 = guest_NRADDR_GPR2 */ \ + "or 4,4,4\n\t" \ + "mr %0,3" \ + : "=b" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->r2 = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R12 */ \ + "or 3,3,3\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or 5,5,5\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ + "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("mov r3, %1\n\t" /*default*/ \ + "mov r4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = client_request ( R4 ) */ \ + "orr r10, r10, r10\n\t" \ + "mov %0, r3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "cc","memory", "r3", "r4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* R3 = guest_NRADDR */ \ + "orr r11, r11, r11\n\t" \ + "mov %0, r3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "r3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir *%R4 */ \ + "orr r12, r12, r12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr r9, r9, r9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------- */ + +#if defined(PLAT_arm64_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ + "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + \ + __extension__ \ + ({volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile("mov x3, %1\n\t" /*default*/ \ + "mov x4, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = client_request ( X4 ) */ \ + "orr x10, x10, x10\n\t" \ + "mov %0, x3" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" ((unsigned long int)(_zzq_default)), \ + "r" (&_zzq_args[0]) \ + : "cc","memory", "x3", "x4"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* X3 = guest_NRADDR */ \ + "orr x11, x11, x11\n\t" \ + "mov %0, x3" \ + : "=r" (__addr) \ + : \ + : "cc", "memory", "x3" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* branch-and-link-to-noredir X8 */ \ + "orr x12, x12, x12\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "orr x9, x9, x9\n\t" \ + : : : "cc", "memory" \ + ); \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------ s390x-linux ------------------------ */ + +#if defined(PLAT_s390x_linux) + +typedef + struct { + unsigned long int nraddr; /* where's the code? */ + } + OrigFn; + +/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific + * code. This detection is implemented in platform specific toIR.c + * (e.g. VEX/priv/guest_s390_decoder.c). + */ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "lr 15,15\n\t" \ + "lr 1,1\n\t" \ + "lr 2,2\n\t" \ + "lr 3,3\n\t" + +#define __CLIENT_REQUEST_CODE "lr 2,2\n\t" +#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" +#define __CALL_NO_REDIR_CODE "lr 4,4\n\t" +#define __VEX_INJECT_IR_CODE "lr 5,5\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile(/* r2 = args */ \ + "lgr 2,%1\n\t" \ + /* r3 = default */ \ + "lgr 3,%2\n\t" \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CLIENT_REQUEST_CODE \ + /* results = r3 */ \ + "lgr %0, 3\n\t" \ + : "=d" (_zzq_result) \ + : "a" (&_zzq_args[0]), "0" (_zzq_default) \ + : "cc", "2", "3", "memory" \ + ); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __GET_NR_CONTEXT_CODE \ + "lgr %0, 3\n\t" \ + : "=a" (__addr) \ + : \ + : "cc", "3", "memory" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_R1 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + __CALL_NO_REDIR_CODE + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + __VEX_INJECT_IR_CODE); \ + } while (0) + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ---------------- */ + +#if defined(PLAT_mips32_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; + +/* .word 0x342 + * .word 0x742 + * .word 0xC2 + * .word 0x4C2*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "srl $0, $0, 13\n\t" \ + "srl $0, $0, 29\n\t" \ + "srl $0, $0, 3\n\t" \ + "srl $0, $0, 19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* T3 = client_request ( T4 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %t9 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11" \ + ); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir *%t9 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- mips64-linux ---------------- */ + +#if defined(PLAT_mips64_linux) + +typedef + struct { + unsigned long nraddr; /* where's the code? */ + } + OrigFn; + +/* dsll $0,$0, 3 + * dsll $0,$0, 13 + * dsll $0,$0, 29 + * dsll $0,$0, 19*/ +#define __SPECIAL_INSTRUCTION_PREAMBLE \ + "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ + "dsll $0,$0,29 ; dsll $0,$0,19\n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned long int _zzq_args[6]; \ + volatile unsigned long int _zzq_result; \ + _zzq_args[0] = (unsigned long int)(_zzq_request); \ + _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ + __asm__ volatile("move $11, %1\n\t" /*default*/ \ + "move $12, %2\n\t" /*ptr*/ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = client_request ( $12 ) */ \ + "or $13, $13, $13\n\t" \ + "move %0, $11\n\t" /*result*/ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$11", "$12", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $11 = guest_NRADDR */ \ + "or $14, $14, $14\n\t" \ + "move %0, $11" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$11"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or $15, $15, $15\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or $11, $11, $11\n\t" \ + ); \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +#if defined(PLAT_nanomips_linux) + +typedef + struct { + unsigned int nraddr; /* where's the code? */ + } + OrigFn; +/* + 8000 c04d srl zero, zero, 13 + 8000 c05d srl zero, zero, 29 + 8000 c043 srl zero, zero, 3 + 8000 c053 srl zero, zero, 19 +*/ + +#define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \ + "srl[32] $zero, $zero, 29 \n\t" \ + "srl[32] $zero, $zero, 3 \n\t" \ + "srl[32] $zero, $zero, 19 \n\t" + +#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + _zzq_default, _zzq_request, \ + _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ + __extension__ \ + ({ volatile unsigned int _zzq_args[6]; \ + volatile unsigned int _zzq_result; \ + _zzq_args[0] = (unsigned int)(_zzq_request); \ + _zzq_args[1] = (unsigned int)(_zzq_arg1); \ + _zzq_args[2] = (unsigned int)(_zzq_arg2); \ + _zzq_args[3] = (unsigned int)(_zzq_arg3); \ + _zzq_args[4] = (unsigned int)(_zzq_arg4); \ + _zzq_args[5] = (unsigned int)(_zzq_arg5); \ + __asm__ volatile("move $a7, %1\n\t" /* default */ \ + "move $t0, %2\n\t" /* ptr */ \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = client_request( $t0 ) */ \ + "or[32] $t0, $t0, $t0\n\t" \ + "move %0, $a7\n\t" /* result */ \ + : "=r" (_zzq_result) \ + : "r" (_zzq_default), "r" (&_zzq_args[0]) \ + : "$a7", "$t0", "memory"); \ + _zzq_result; \ + }) + +#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ + { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ + volatile unsigned long int __addr; \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* $a7 = guest_NRADDR */ \ + "or[32] $t1, $t1, $t1\n\t" \ + "move %0, $a7" /*result*/ \ + : "=r" (__addr) \ + : \ + : "$a7"); \ + _zzq_orig->nraddr = __addr; \ + } + +#define VALGRIND_CALL_NOREDIR_T9 \ + __SPECIAL_INSTRUCTION_PREAMBLE \ + /* call-noredir $25 */ \ + "or[32] $t2, $t2, $t2\n\t" + +#define VALGRIND_VEX_INJECT_IR() \ + do { \ + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + "or[32] $t3, $t3, $t3\n\t" \ + ); \ + } while (0) + +#endif +/* Insert assembly code for other platforms here... */ + +#endif /* NVALGRIND */ + + +/* ------------------------------------------------------------------ */ +/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ +/* ugly. It's the least-worst tradeoff I can think of. */ +/* ------------------------------------------------------------------ */ + +/* This section defines magic (a.k.a appalling-hack) macros for doing + guaranteed-no-redirection macros, so as to get from function + wrappers to the functions they are wrapping. The whole point is to + construct standard call sequences, but to do the call itself with a + special no-redirect call pseudo-instruction that the JIT + understands and handles specially. This section is long and + repetitious, and I can't see a way to make it shorter. + + The naming scheme is as follows: + + CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} + + 'W' stands for "word" and 'v' for "void". Hence there are + different macros for calling arity 0, 1, 2, 3, 4, etc, functions, + and for each, the possibility of returning a word-typed result, or + no result. +*/ + +/* Use these to write the name of your wrapper. NOTE: duplicates + VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts + the default behaviour equivalance class tag "0000" into the name. + See pub_tool_redir.h for details -- normally you don't need to + think about this, though. */ + +/* Use an extra level of macroisation so as to ensure the soname/fnname + args are fully macro-expanded before pasting them together. */ +#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd + +#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) + +#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) + +/* Use this macro from within a wrapper function to collect the + context (address and possibly other info) of the original function. + Once you have that you can then use it in one of the CALL_FN_ + macros. The type of the argument _lval is OrigFn. */ +#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) + +/* Also provide end-user facilities for function replacement, rather + than wrapping. A replacement function differs from a wrapper in + that it has no way to get hold of the original function being + called, and hence no way to call onwards to it. In a replacement + function, VALGRIND_GET_ORIG_FN always returns zero. */ + +#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ + VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) + +#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ + VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) + +/* Derivatives of the main macros below, for calling functions + returning void. */ + +#define CALL_FN_v_v(fnptr) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_v(_junk,fnptr); } while (0) + +#define CALL_FN_v_W(fnptr, arg1) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_W(_junk,fnptr,arg1); } while (0) + +#define CALL_FN_v_WW(fnptr, arg1,arg2) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) + +#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) + +#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) + +#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) + +#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) + +#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ + do { volatile unsigned long _junk; \ + CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) + +/* ----------------- x86-{linux,darwin,solaris} ---------------- */ + +#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ + || defined(PLAT_x86_solaris) + +/* These regs are trashed by the hidden call. No need to mention eax + as gcc can already see that, plus causes gcc to bomb. */ +#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movl %%esp,%%edi\n\t" \ + "andl $0xfffffff0,%%esp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movl %%edi,%%esp\n\t" + +/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $12, %%esp\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $8, %%esp\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "subl $4, %%esp\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "pushl 48(%%eax)\n\t" \ + "pushl 44(%%eax)\n\t" \ + "pushl 40(%%eax)\n\t" \ + "pushl 36(%%eax)\n\t" \ + "pushl 32(%%eax)\n\t" \ + "pushl 28(%%eax)\n\t" \ + "pushl 24(%%eax)\n\t" \ + "pushl 20(%%eax)\n\t" \ + "pushl 16(%%eax)\n\t" \ + "pushl 12(%%eax)\n\t" \ + "pushl 8(%%eax)\n\t" \ + "pushl 4(%%eax)\n\t" \ + "movl (%%eax), %%eax\n\t" /* target->%eax */ \ + VALGRIND_CALL_NOREDIR_EAX \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ + +/* ---------------- amd64-{linux,darwin,solaris} --------------- */ + +#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ + || defined(PLAT_amd64_solaris) + +/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ + "rdi", "r8", "r9", "r10", "r11" + +/* This is all pretty complex. It's so as to make stack unwinding + work reliably. See bug 243270. The basic problem is the sub and + add of 128 of %rsp in all of the following macros. If gcc believes + the CFA is in %rsp, then unwinding may fail, because what's at the + CFA is not what gcc "expected" when it constructs the CFIs for the + places where the macros are instantiated. + + But we can't just add a CFI annotation to increase the CFA offset + by 128, to match the sub of 128 from %rsp, because we don't know + whether gcc has chosen %rsp as the CFA at that point, or whether it + has chosen some other register (eg, %rbp). In the latter case, + adding a CFI annotation to change the CFA offset is simply wrong. + + So the solution is to get hold of the CFA using + __builtin_dwarf_cfa(), put it in a known register, and add a + CFI annotation to say what the register is. We choose %rbp for + this (perhaps perversely), because: + + (1) %rbp is already subject to unwinding. If a new register was + chosen then the unwinder would have to unwind it in all stack + traces, which is expensive, and + + (2) %rbp is already subject to precise exception updates in the + JIT. If a new register was chosen, we'd have to have precise + exceptions for it too, which reduces performance of the + generated code. + + However .. one extra complication. We can't just whack the result + of __builtin_dwarf_cfa() into %rbp and then add %rbp to the + list of trashed registers at the end of the inline assembly + fragments; gcc won't allow %rbp to appear in that list. Hence + instead we need to stash %rbp in %r15 for the duration of the asm, + and say that %r15 is trashed instead. gcc seems happy to go with + that. + + Oh .. and this all needs to be conditionalised so that it is + unchanged from before this commit, when compiled with older gccs + that don't support __builtin_dwarf_cfa. Furthermore, since + this header file is freestanding, it has to be independent of + config.h, and so the following conditionalisation cannot depend on + configure time checks. + + Although it's not clear from + 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', + this expression excludes Darwin. + .cfi directives in Darwin assembly appear to be completely + different and I haven't investigated how they work. + + For even more entertainment value, note we have to use the + completely undocumented __builtin_dwarf_cfa(), which appears to + really compute the CFA, whereas __builtin_frame_address(0) claims + to but actually doesn't. See + https://bugs.kde.org/show_bug.cgi?id=243270#c47 +*/ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"r"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + "movq %%rbp, %%r15\n\t" \ + "movq %2, %%rbp\n\t" \ + ".cfi_remember_state\n\t" \ + ".cfi_def_cfa rbp, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "movq %%r15, %%rbp\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "movq %%rsp,%%r14\n\t" \ + "andq $0xfffffffffffffff0,%%rsp\n\t" +#define VALGRIND_RESTORE_STACK \ + "movq %%r14,%%rsp\n\t" + +/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned + long) == 8. */ + +/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ + macros. In order not to trash the stack redzone, we need to drop + %rsp by 128 before the hidden call, and restore afterwards. The + nastyness is that it is only by luck that the stack still appears + to be unwindable during the hidden call - since then the behaviour + of any routine using this macro does not match what the CFI data + says. Sigh. + + Why is this important? Imagine that a wrapper has a stack + allocated local, and passes to the hidden call, a pointer to it. + Because gcc does not know about the hidden call, it may allocate + that local in the redzone. Unfortunately the hidden call may then + trash it before it comes to use it. So we must step clear of the + redzone, for the duration of the hidden call, to make it safe. + + Probably the same problem afflicts the other redzone-style ABIs too + (ppc64-linux); but for those, the stack is + self describing (none of this CFI nonsense) so at least messing + with the stack pointer doesn't give a danger of non-unwindable + stack. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $136,%%rsp\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + VALGRIND_ALIGN_STACK \ + "subq $128,%%rsp\n\t" \ + "pushq 96(%%rax)\n\t" \ + "pushq 88(%%rax)\n\t" \ + "pushq 80(%%rax)\n\t" \ + "pushq 72(%%rax)\n\t" \ + "pushq 64(%%rax)\n\t" \ + "pushq 56(%%rax)\n\t" \ + "movq 48(%%rax), %%r9\n\t" \ + "movq 40(%%rax), %%r8\n\t" \ + "movq 32(%%rax), %%rcx\n\t" \ + "movq 24(%%rax), %%rdx\n\t" \ + "movq 16(%%rax), %%rsi\n\t" \ + "movq 8(%%rax), %%rdi\n\t" \ + "movq (%%rax), %%rax\n\t" /* target->%rax */ \ + VALGRIND_CALL_NOREDIR_RAX \ + VALGRIND_RESTORE_STACK \ + VALGRIND_CFI_EPILOGUE \ + : /*out*/ "=a" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ + +/* ------------------------ ppc32-linux ------------------------ */ + +#if defined(PLAT_ppc32_linux) + +/* This is useful for finding out about the on-stack stuff: + + extern int f9 ( int,int,int,int,int,int,int,int,int ); + extern int f10 ( int,int,int,int,int,int,int,int,int,int ); + extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); + extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); + + int g9 ( void ) { + return f9(11,22,33,44,55,66,77,88,99); + } + int g10 ( void ) { + return f10(11,22,33,44,55,66,77,88,99,110); + } + int g11 ( void ) { + return f11(11,22,33,44,55,66,77,88,99,110,121); + } + int g12 ( void ) { + return f12(11,22,33,44,55,66,77,88,99,110,121,132); + } +*/ + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rlwinm 1,1,0,0,27\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc32-linux, + sizeof(unsigned long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-16\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "addi 1,1,-32\n\t" \ + /* arg12 */ \ + "lwz 3,48(11)\n\t" \ + "stw 3,20(1)\n\t" \ + /* arg11 */ \ + "lwz 3,44(11)\n\t" \ + "stw 3,16(1)\n\t" \ + /* arg10 */ \ + "lwz 3,40(11)\n\t" \ + "stw 3,12(1)\n\t" \ + /* arg9 */ \ + "lwz 3,36(11)\n\t" \ + "stw 3,8(1)\n\t" \ + /* args1-8 */ \ + "lwz 3,4(11)\n\t" /* arg1->r3 */ \ + "lwz 4,8(11)\n\t" \ + "lwz 5,12(11)\n\t" \ + "lwz 6,16(11)\n\t" /* arg4->r6 */ \ + "lwz 7,20(11)\n\t" \ + "lwz 8,24(11)\n\t" \ + "lwz 9,28(11)\n\t" \ + "lwz 10,32(11)\n\t" /* arg8->r10 */ \ + "lwz 11,0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + VALGRIND_RESTORE_STACK \ + "mr %0,3" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc32_linux */ + +/* ------------------------ ppc64-linux ------------------------ */ + +#if defined(PLAT_ppc64be_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 11,%1\n\t" \ + "std 2,-16(11)\n\t" /* save tocptr */ \ + "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(11)\n\t" \ + "std 3,136(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(11)\n\t" \ + "std 3,128(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(11)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(11)\n\t" \ + "std 3,112(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(11)\n\t" /* arg1->r3 */ \ + "ld 4, 16(11)\n\t" /* arg2->r4 */ \ + "ld 5, 24(11)\n\t" /* arg3->r5 */ \ + "ld 6, 32(11)\n\t" /* arg4->r6 */ \ + "ld 7, 40(11)\n\t" /* arg5->r7 */ \ + "ld 8, 48(11)\n\t" /* arg6->r8 */ \ + "ld 9, 56(11)\n\t" /* arg7->r9 */ \ + "ld 10, 64(11)\n\t" /* arg8->r10 */ \ + "ld 11, 0(11)\n\t" /* target->r11 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ + "mr 11,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(11)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64be_linux */ + +/* ------------------------- ppc64le-linux ----------------------- */ +#if defined(PLAT_ppc64le_linux) + +/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "lr", "ctr", "xer", \ + "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ + "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ + "r11", "r12", "r13" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +#define VALGRIND_ALIGN_STACK \ + "mr 28,1\n\t" \ + "rldicr 1,1,0,59\n\t" +#define VALGRIND_RESTORE_STACK \ + "mr 1,28\n\t" + +/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned + long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+0]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+1]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+2]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+3]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+4]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+5]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+6]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+7]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+8]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+9]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+10]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-128\n\t" /* expand stack frame */ \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+11]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3+12]; \ + volatile unsigned long _res; \ + /* _argvec[0] holds current r2 across the call */ \ + _argvec[1] = (unsigned long)_orig.r2; \ + _argvec[2] = (unsigned long)_orig.nraddr; \ + _argvec[2+1] = (unsigned long)arg1; \ + _argvec[2+2] = (unsigned long)arg2; \ + _argvec[2+3] = (unsigned long)arg3; \ + _argvec[2+4] = (unsigned long)arg4; \ + _argvec[2+5] = (unsigned long)arg5; \ + _argvec[2+6] = (unsigned long)arg6; \ + _argvec[2+7] = (unsigned long)arg7; \ + _argvec[2+8] = (unsigned long)arg8; \ + _argvec[2+9] = (unsigned long)arg9; \ + _argvec[2+10] = (unsigned long)arg10; \ + _argvec[2+11] = (unsigned long)arg11; \ + _argvec[2+12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "mr 12,%1\n\t" \ + "std 2,-16(12)\n\t" /* save tocptr */ \ + "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ + "addi 1,1,-144\n\t" /* expand stack frame */ \ + /* arg12 */ \ + "ld 3,96(12)\n\t" \ + "std 3,120(1)\n\t" \ + /* arg11 */ \ + "ld 3,88(12)\n\t" \ + "std 3,112(1)\n\t" \ + /* arg10 */ \ + "ld 3,80(12)\n\t" \ + "std 3,104(1)\n\t" \ + /* arg9 */ \ + "ld 3,72(12)\n\t" \ + "std 3,96(1)\n\t" \ + /* args1-8 */ \ + "ld 3, 8(12)\n\t" /* arg1->r3 */ \ + "ld 4, 16(12)\n\t" /* arg2->r4 */ \ + "ld 5, 24(12)\n\t" /* arg3->r5 */ \ + "ld 6, 32(12)\n\t" /* arg4->r6 */ \ + "ld 7, 40(12)\n\t" /* arg5->r7 */ \ + "ld 8, 48(12)\n\t" /* arg6->r8 */ \ + "ld 9, 56(12)\n\t" /* arg7->r9 */ \ + "ld 10, 64(12)\n\t" /* arg8->r10 */ \ + "ld 12, 0(12)\n\t" /* target->r12 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ + "mr 12,%1\n\t" \ + "mr %0,3\n\t" \ + "ld 2,-16(12)\n\t" /* restore tocptr */ \ + VALGRIND_RESTORE_STACK \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[2]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_ppc64le_linux */ + +/* ------------------------- arm-linux ------------------------- */ + +#if defined(PLAT_arm_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" + +/* Macros to save and align the stack before making a function + call and restore it afterwards as gcc may not keep the stack + pointer aligned if it doesn't realise calls are being made + to other functions. */ + +/* This is a bit tricky. We store the original stack pointer in r10 + as it is callee-saves. gcc doesn't allow the use of r11 for some + reason. Also, we can't directly "bic" the stack pointer in thumb + mode since r13 isn't an allowed register number in that context. + So use r4 as a temporary, since that is about to get trashed + anyway, just after each use of this macro. Side effect is we need + to be very careful about any future changes, since + VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ +#define VALGRIND_ALIGN_STACK \ + "mov r10, sp\n\t" \ + "mov r4, sp\n\t" \ + "bic r4, r4, #7\n\t" \ + "mov sp, r4\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, r10\n\t" + +/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "push {r0, r1, r2, r3} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "push {r0} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #4 \n\t" \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "push {r0, r1} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr r0, [%1, #40] \n\t" \ + "ldr r1, [%1, #44] \n\t" \ + "ldr r2, [%1, #48] \n\t" \ + "push {r0, r1, r2} \n\t" \ + "ldr r0, [%1, #20] \n\t" \ + "ldr r1, [%1, #24] \n\t" \ + "ldr r2, [%1, #28] \n\t" \ + "ldr r3, [%1, #32] \n\t" \ + "ldr r4, [%1, #36] \n\t" \ + "push {r0, r1, r2, r3, r4} \n\t" \ + "ldr r0, [%1, #4] \n\t" \ + "ldr r1, [%1, #8] \n\t" \ + "ldr r2, [%1, #12] \n\t" \ + "ldr r3, [%1, #16] \n\t" \ + "ldr r4, [%1] \n\t" /* target->r4 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ + VALGRIND_RESTORE_STACK \ + "mov %0, r0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm_linux */ + +/* ------------------------ arm64-linux ------------------------ */ + +#if defined(PLAT_arm64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS \ + "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ + "x18", "x19", "x20", "x30", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ + "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ + "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ + "v26", "v27", "v28", "v29", "v30", "v31" + +/* x21 is callee-saved, so we can use it to save and restore SP around + the hidden call. */ +#define VALGRIND_ALIGN_STACK \ + "mov x21, sp\n\t" \ + "bic sp, x21, #15\n\t" +#define VALGRIND_RESTORE_STACK \ + "mov sp, x21\n\t" + +/* These CALL_FN_ macros assume that on arm64-linux, + sizeof(unsigned long) == 8. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x20 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10,arg11, \ + arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + VALGRIND_ALIGN_STACK \ + "sub sp, sp, #0x30 \n\t" \ + "ldr x0, [%1, #8] \n\t" \ + "ldr x1, [%1, #16] \n\t" \ + "ldr x2, [%1, #24] \n\t" \ + "ldr x3, [%1, #32] \n\t" \ + "ldr x4, [%1, #40] \n\t" \ + "ldr x5, [%1, #48] \n\t" \ + "ldr x6, [%1, #56] \n\t" \ + "ldr x7, [%1, #64] \n\t" \ + "ldr x8, [%1, #72] \n\t" \ + "str x8, [sp, #0] \n\t" \ + "ldr x8, [%1, #80] \n\t" \ + "str x8, [sp, #8] \n\t" \ + "ldr x8, [%1, #88] \n\t" \ + "str x8, [sp, #16] \n\t" \ + "ldr x8, [%1, #96] \n\t" \ + "str x8, [sp, #24] \n\t" \ + "ldr x8, [%1] \n\t" /* target->x8 */ \ + VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ + VALGRIND_RESTORE_STACK \ + "mov %0, x0" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_arm64_linux */ + +/* ------------------------- s390x-linux ------------------------- */ + +#if defined(PLAT_s390x_linux) + +/* Similar workaround as amd64 (see above), but we use r11 as frame + pointer and save the old r11 in r7. r11 might be used for + argvec, therefore we copy argvec in r1 since r1 is clobbered + after the call anyway. */ +#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) +# define __FRAME_POINTER \ + ,"d"(__builtin_dwarf_cfa()) +# define VALGRIND_CFI_PROLOGUE \ + ".cfi_remember_state\n\t" \ + "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ + "lgr 7,11\n\t" \ + "lgr 11,%2\n\t" \ + ".cfi_def_cfa r11, 0\n\t" +# define VALGRIND_CFI_EPILOGUE \ + "lgr 11, 7\n\t" \ + ".cfi_restore_state\n\t" +#else +# define __FRAME_POINTER +# define VALGRIND_CFI_PROLOGUE \ + "lgr 1,%1\n\t" +# define VALGRIND_CFI_EPILOGUE +#endif + +/* Nb: On s390 the stack pointer is properly aligned *at all times* + according to the s390 GCC maintainer. (The ABI specification is not + precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and + VALGRIND_RESTORE_STACK are not defined here. */ + +/* These regs are trashed by the hidden call. Note that we overwrite + r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the + function a proper return address. All others are ABI defined call + clobbers. */ +#if defined(__VX__) || defined(__S390_VX__) +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +#else +#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" +#endif + +/* Nb: Although r11 is modified in the asm snippets below (inside + VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for + two reasons: + (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not + modified + (2) GCC will complain that r11 cannot appear inside a clobber section, + when compiled with -O -fno-omit-frame-pointer + */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 1, 0(1)\n\t" /* target->r1 */ \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +/* The call abi has the arguments in r2-r6 and stack */ +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1, arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-160\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,160\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-168\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,168\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-176\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,176\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-184\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,184\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-192\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,192\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-200\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,200\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-208\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,208\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)arg1; \ + _argvec[2] = (unsigned long)arg2; \ + _argvec[3] = (unsigned long)arg3; \ + _argvec[4] = (unsigned long)arg4; \ + _argvec[5] = (unsigned long)arg5; \ + _argvec[6] = (unsigned long)arg6; \ + _argvec[7] = (unsigned long)arg7; \ + _argvec[8] = (unsigned long)arg8; \ + _argvec[9] = (unsigned long)arg9; \ + _argvec[10] = (unsigned long)arg10; \ + _argvec[11] = (unsigned long)arg11; \ + _argvec[12] = (unsigned long)arg12; \ + __asm__ volatile( \ + VALGRIND_CFI_PROLOGUE \ + "aghi 15,-216\n\t" \ + "lg 2, 8(1)\n\t" \ + "lg 3,16(1)\n\t" \ + "lg 4,24(1)\n\t" \ + "lg 5,32(1)\n\t" \ + "lg 6,40(1)\n\t" \ + "mvc 160(8,15), 48(1)\n\t" \ + "mvc 168(8,15), 56(1)\n\t" \ + "mvc 176(8,15), 64(1)\n\t" \ + "mvc 184(8,15), 72(1)\n\t" \ + "mvc 192(8,15), 80(1)\n\t" \ + "mvc 200(8,15), 88(1)\n\t" \ + "mvc 208(8,15), 96(1)\n\t" \ + "lg 1, 0(1)\n\t" \ + VALGRIND_CALL_NOREDIR_R1 \ + "aghi 15,216\n\t" \ + VALGRIND_CFI_EPILOGUE \ + "lgr %0, 2\n\t" \ + : /*out*/ "=d" (_res) \ + : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ + : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + + +#endif /* PLAT_s390x_linux */ + +/* ------------------------- mips32-linux ----------------------- */ + +#if defined(PLAT_mips32_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16\n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" /* arg1*/ \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "subu $29, $29, 16 \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 16 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 24\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 24 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "nop\n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 32\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 32 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 40\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 40 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 48\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 48 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "subu $29, $29, 8 \n\t" \ + "sw $28, 0($29) \n\t" \ + "sw $31, 4($29) \n\t" \ + "lw $4, 20(%1) \n\t" \ + "subu $29, $29, 56\n\t" \ + "sw $4, 16($29) \n\t" \ + "lw $4, 24(%1) \n\t" \ + "sw $4, 20($29) \n\t" \ + "lw $4, 28(%1) \n\t" \ + "sw $4, 24($29) \n\t" \ + "lw $4, 32(%1) \n\t" \ + "sw $4, 28($29) \n\t" \ + "lw $4, 36(%1) \n\t" \ + "sw $4, 32($29) \n\t" \ + "lw $4, 40(%1) \n\t" \ + "sw $4, 36($29) \n\t" \ + "lw $4, 44(%1) \n\t" \ + "sw $4, 40($29) \n\t" \ + "lw $4, 48(%1) \n\t" \ + "sw $4, 44($29) \n\t" \ + "lw $4, 4(%1) \n\t" \ + "lw $5, 8(%1) \n\t" \ + "lw $6, 12(%1) \n\t" \ + "lw $7, 16(%1) \n\t" \ + "lw $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "addu $29, $29, 56 \n\t" \ + "lw $28, 0($29) \n\t" \ + "lw $31, 4($29) \n\t" \ + "addu $29, $29, 8 \n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_mips32_linux */ + +/* ------------------------- nanomips-linux -------------------- */ + +#if defined(PLAT_nanomips_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \ +"$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \ +"$t8","$t9", "$at" + +/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned + long) == 4. */ + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[1]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[2]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[3]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[4]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[5]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[6]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[7]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[8]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[9]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + __asm__ volatile( \ + "lw $t9, 0(%1)\n\t" \ + "lw $a0, 4(%1)\n\t" \ + "lw $a1, 8(%1)\n\t" \ + "lw $a2,12(%1)\n\t" \ + "lw $a3,16(%1)\n\t" \ + "lw $a4,20(%1)\n\t" \ + "lw $a5,24(%1)\n\t" \ + "lw $a6,28(%1)\n\t" \ + "lw $a7,32(%1)\n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[10]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[11]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[12]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long _argvec[13]; \ + volatile unsigned long _res; \ + _argvec[0] = (unsigned long)_orig.nraddr; \ + _argvec[1] = (unsigned long)(arg1); \ + _argvec[2] = (unsigned long)(arg2); \ + _argvec[3] = (unsigned long)(arg3); \ + _argvec[4] = (unsigned long)(arg4); \ + _argvec[5] = (unsigned long)(arg5); \ + _argvec[6] = (unsigned long)(arg6); \ + _argvec[7] = (unsigned long)(arg7); \ + _argvec[8] = (unsigned long)(arg8); \ + _argvec[9] = (unsigned long)(arg9); \ + _argvec[10] = (unsigned long)(arg10); \ + _argvec[11] = (unsigned long)(arg11); \ + _argvec[12] = (unsigned long)(arg12); \ + __asm__ volatile( \ + "addiu $sp, $sp, -16 \n\t" \ + "lw $t9,36(%1) \n\t" \ + "sw $t9, 0($sp) \n\t" \ + "lw $t9,40(%1) \n\t" \ + "sw $t9, 4($sp) \n\t" \ + "lw $t9,44(%1) \n\t" \ + "sw $t9, 8($sp) \n\t" \ + "lw $t9,48(%1) \n\t" \ + "sw $t9,12($sp) \n\t" \ + "lw $t9, 0(%1) \n\t" \ + "lw $a0, 4(%1) \n\t" \ + "lw $a1, 8(%1) \n\t" \ + "lw $a2,12(%1) \n\t" \ + "lw $a3,16(%1) \n\t" \ + "lw $a4,20(%1) \n\t" \ + "lw $a5,24(%1) \n\t" \ + "lw $a6,28(%1) \n\t" \ + "lw $a7,32(%1) \n\t" \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $a0 \n\t" \ + "addiu $sp, $sp, 16 \n\t" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) _res; \ + } while (0) + +#endif /* PLAT_nanomips_linux */ + +/* ------------------------- mips64-linux ------------------------- */ + +#if defined(PLAT_mips64_linux) + +/* These regs are trashed by the hidden call. */ +#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ +"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ +"$25", "$31" + +/* These CALL_FN_ macros assume that on mips64-linux, + sizeof(long long) == 8. */ + +#define MIPS64_LONG2REG_CAST(x) ((long long)(long)x) + +#define CALL_FN_W_v(lval, orig) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[1]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + __asm__ volatile( \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "0" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_W(lval, orig, arg1) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[2]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" /* arg1*/ \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[3]; \ + volatile unsigned long long _res; \ + _argvec[0] = _orig.nraddr; \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + + +#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[4]; \ + volatile unsigned long long _res; \ + _argvec[0] = _orig.nraddr; \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[5]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[6]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[7]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[8]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[9]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + __asm__ volatile( \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1) \n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[10]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + __asm__ volatile( \ + "dsubu $29, $29, 8\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 8\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ + arg7,arg8,arg9,arg10) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[11]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + __asm__ volatile( \ + "dsubu $29, $29, 16\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 16\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[12]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ + __asm__ volatile( \ + "dsubu $29, $29, 24\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 24\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ + arg6,arg7,arg8,arg9,arg10, \ + arg11,arg12) \ + do { \ + volatile OrigFn _orig = (orig); \ + volatile unsigned long long _argvec[13]; \ + volatile unsigned long long _res; \ + _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ + _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ + _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ + _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ + _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ + _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ + _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ + _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ + _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ + _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ + _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ + _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ + _argvec[12] = MIPS64_LONG2REG_CAST(arg12); \ + __asm__ volatile( \ + "dsubu $29, $29, 32\n\t" \ + "ld $4, 72(%1)\n\t" \ + "sd $4, 0($29)\n\t" \ + "ld $4, 80(%1)\n\t" \ + "sd $4, 8($29)\n\t" \ + "ld $4, 88(%1)\n\t" \ + "sd $4, 16($29)\n\t" \ + "ld $4, 96(%1)\n\t" \ + "sd $4, 24($29)\n\t" \ + "ld $4, 8(%1)\n\t" \ + "ld $5, 16(%1)\n\t" \ + "ld $6, 24(%1)\n\t" \ + "ld $7, 32(%1)\n\t" \ + "ld $8, 40(%1)\n\t" \ + "ld $9, 48(%1)\n\t" \ + "ld $10, 56(%1)\n\t" \ + "ld $11, 64(%1)\n\t" \ + "ld $25, 0(%1)\n\t" /* target->t9 */ \ + VALGRIND_CALL_NOREDIR_T9 \ + "daddu $29, $29, 32\n\t" \ + "move %0, $2\n" \ + : /*out*/ "=r" (_res) \ + : /*in*/ "r" (&_argvec[0]) \ + : /*trash*/ "memory", __CALLER_SAVED_REGS \ + ); \ + lval = (__typeof__(lval)) (long)_res; \ + } while (0) + +#endif /* PLAT_mips64_linux */ + +/* ------------------------------------------------------------------ */ +/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ +/* */ +/* ------------------------------------------------------------------ */ + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. + + Core ones are in the range 0x00000000--0x0000ffff. The non-public + ones start at 0x2000. +*/ + +/* These macros are used by tools -- they must be public, but don't + embed them into other programs. */ +#define VG_USERREQ_TOOL_BASE(a,b) \ + ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) +#define VG_IS_TOOL_USERREQ(a, b, v) \ + (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) + +/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! + This enum comprises an ABI exported by Valgrind to programs + which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE + ENTRIES, NOR DELETE ANY -- add new ones at the end of the most + relevant group. */ +typedef + enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, + VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, + + /* These allow any function to be called from the simulated + CPU but run on the real CPU. Nb: the first arg passed to + the function is always the ThreadId of the running + thread! So CLIENT_CALL0 actually requires a 1 arg + function, etc. */ + VG_USERREQ__CLIENT_CALL0 = 0x1101, + VG_USERREQ__CLIENT_CALL1 = 0x1102, + VG_USERREQ__CLIENT_CALL2 = 0x1103, + VG_USERREQ__CLIENT_CALL3 = 0x1104, + + /* Can be useful in regression testing suites -- eg. can + send Valgrind's output to /dev/null and still count + errors. */ + VG_USERREQ__COUNT_ERRORS = 0x1201, + + /* Allows the client program and/or gdbserver to execute a monitor + command. */ + VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, + + /* Allows the client program to change a dynamic command line + option. */ + VG_USERREQ__CLO_CHANGE = 0x1203, + + /* These are useful and can be interpreted by any tool that + tracks malloc() et al, by using vg_replace_malloc.c. */ + VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, + VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, + VG_USERREQ__FREELIKE_BLOCK = 0x1302, + /* Memory pool support. */ + VG_USERREQ__CREATE_MEMPOOL = 0x1303, + VG_USERREQ__DESTROY_MEMPOOL = 0x1304, + VG_USERREQ__MEMPOOL_ALLOC = 0x1305, + VG_USERREQ__MEMPOOL_FREE = 0x1306, + VG_USERREQ__MEMPOOL_TRIM = 0x1307, + VG_USERREQ__MOVE_MEMPOOL = 0x1308, + VG_USERREQ__MEMPOOL_CHANGE = 0x1309, + VG_USERREQ__MEMPOOL_EXISTS = 0x130a, + + /* Allow printfs to valgrind log. */ + /* The first two pass the va_list argument by value, which + assumes it is the same size as or smaller than a UWord, + which generally isn't the case. Hence are deprecated. + The second two pass the vargs by reference and so are + immune to this problem. */ + /* both :: char* fmt, va_list vargs (DEPRECATED) */ + VG_USERREQ__PRINTF = 0x1401, + VG_USERREQ__PRINTF_BACKTRACE = 0x1402, + /* both :: char* fmt, va_list* vargs */ + VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, + + /* Stack support. */ + VG_USERREQ__STACK_REGISTER = 0x1501, + VG_USERREQ__STACK_DEREGISTER = 0x1502, + VG_USERREQ__STACK_CHANGE = 0x1503, + + /* Wine support */ + VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, + + /* Querying of debug info. */ + VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, + + /* Disable/enable error reporting level. Takes a single + Word arg which is the delta to this thread's error + disablement indicator. Hence 1 disables or further + disables errors, and -1 moves back towards enablement. + Other values are not allowed. */ + VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, + + /* Some requests used for Valgrind internal, such as + self-test or self-hosting. */ + /* Initialise IR injection */ + VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901, + /* Used by Inner Valgrind to inform Outer Valgrind where to + find the list of inner guest threads */ + VG_USERREQ__INNER_THREADS = 0x1902 + } Vg_ClientRequest; + +#if !defined(__GNUC__) +# define __extension__ /* */ +#endif + + +/* Returns the number of Valgrinds this code is running under. That + is, 0 if running natively, 1 if running under Valgrind, 2 if + running under Valgrind which is running under another Valgrind, + etc. */ +#define RUNNING_ON_VALGRIND \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0, 0) \ + + +/* Discard translation of code in the range [_qzz_addr .. _qzz_addr + + _qzz_len - 1]. Useful if you are debugging a JITter or some such, + since it provides a way to make sure valgrind will retranslate the + invalidated area. Returns no value. */ +#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ + _qzz_addr, _qzz_len, 0, 0, 0) + +#define VALGRIND_INNER_THREADS(_qzz_addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \ + _qzz_addr, 0, 0, 0, 0) + + +/* These requests are for getting Valgrind itself to print something. + Possibly with a backtrace. This is a really ugly hack. The return value + is the number of characters printed, excluding the "**** " part at the + start and the backtrace (if present). */ + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +/* Modern GCC will optimize the static routine out if unused, + and unused attribute will shut down warnings about it. */ +static int VALGRIND_PRINTF(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF(const char *format, ...) +{ +#if defined(NVALGRIND) + (void)format; + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + unsigned long _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) +static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) + __attribute__((format(__printf__, 1, 2), __unused__)); +#endif +static int +#if defined(_MSC_VER) +__inline +#endif +VALGRIND_PRINTF_BACKTRACE(const char *format, ...) +{ +#if defined(NVALGRIND) + (void)format; + return 0; +#else /* NVALGRIND */ +#if defined(_MSC_VER) || defined(__MINGW64__) + uintptr_t _qzz_res; +#else + unsigned long _qzz_res; +#endif + va_list vargs; + va_start(vargs, format); +#if defined(_MSC_VER) || defined(__MINGW64__) + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (uintptr_t)format, + (uintptr_t)&vargs, + 0, 0, 0); +#else + _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, + VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, + (unsigned long)format, + (unsigned long)&vargs, + 0, 0, 0); +#endif + va_end(vargs); + return (int)_qzz_res; +#endif /* NVALGRIND */ +} + + +/* These requests allow control to move from the simulated CPU to the + real CPU, calling an arbitrary function. + + Note that the current ThreadId is inserted as the first argument. + So this call: + + VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) + + requires f to have this signature: + + Word f(Word tid, Word arg1, Word arg2) + + where "Word" is a word-sized type. + + Note that these client requests are not entirely reliable. For example, + if you call a function with them that subsequently calls printf(), + there's a high chance Valgrind will crash. Generally, your prospects of + these working are made higher if the called function does not refer to + any global variables, and does not refer to any libc or other functions + (printf et al). Any kind of entanglement with libc or dynamic linking is + likely to have a bad outcome, for tricky reasons which we've grappled + with a lot in the past. +*/ +#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL0, \ + _qyy_fn, \ + 0, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL1, \ + _qyy_fn, \ + _qyy_arg1, 0, 0, 0) + +#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL2, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, 0, 0) + +#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ + VG_USERREQ__CLIENT_CALL3, \ + _qyy_fn, \ + _qyy_arg1, _qyy_arg2, \ + _qyy_arg3, 0) + + +/* Counts the number of errors that have been recorded by a tool. Nb: + the tool must record the errors with VG_(maybe_record_error)() or + VG_(unique_error)() for them to be counted. */ +#define VALGRIND_COUNT_ERRORS \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ + 0 /* default return */, \ + VG_USERREQ__COUNT_ERRORS, \ + 0, 0, 0, 0, 0) + +/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing + when heap blocks are allocated in order to give accurate results. This + happens automatically for the standard allocator functions such as + malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, + delete[], etc. + + But if your program uses a custom allocator, this doesn't automatically + happen, and Valgrind will not do as well. For example, if you allocate + superblocks with mmap() and then allocates chunks of the superblocks, all + Valgrind's observations will be at the mmap() level and it won't know that + the chunks should be considered separate entities. In Memcheck's case, + that means you probably won't get heap block overrun detection (because + there won't be redzones marked as unaddressable) and you definitely won't + get any leak detection. + + The following client requests allow a custom allocator to be annotated so + that it can be handled accurately by Valgrind. + + VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated + by a malloc()-like function. For Memcheck (an illustrative case), this + does two things: + + - It records that the block has been allocated. This means any addresses + within the block mentioned in error messages will be + identified as belonging to the block. It also means that if the block + isn't freed it will be detected by the leak checker. + + - It marks the block as being addressable and undefined (if 'is_zeroed' is + not set), or addressable and defined (if 'is_zeroed' is set). This + controls how accesses to the block by the program are handled. + + 'addr' is the start of the usable block (ie. after any + redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator + can apply redzones -- these are blocks of padding at the start and end of + each block. Adding redzones is recommended as it makes it much more likely + Valgrind will spot block overruns. `is_zeroed' indicates if the memory is + zeroed (or filled with another predictable value), as is the case for + calloc(). + + VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a + heap block -- that will be used by the client program -- is allocated. + It's best to put it at the outermost level of the allocator if possible; + for example, if you have a function my_alloc() which calls + internal_alloc(), and the client request is put inside internal_alloc(), + stack traces relating to the heap block will contain entries for both + my_alloc() and internal_alloc(), which is probably not what you want. + + For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out + custom blocks from within a heap block, B, that has been allocated with + malloc/calloc/new/etc, then block B will be *ignored* during leak-checking + -- the custom blocks will take precedence. + + VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For + Memcheck, it does two things: + + - It records that the block has been deallocated. This assumes that the + block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - It marks the block as being unaddressable. + + VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a + heap block is deallocated. + + VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For + Memcheck, it does four things: + + - It records that the size of a block has been changed. This assumes that + the block was annotated as having been allocated via + VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. + + - If the block shrunk, it marks the freed memory as being unaddressable. + + - If the block grew, it marks the new area as undefined and defines a red + zone past the end of the new block. + + - The V-bits of the overlap between the old and the new block are preserved. + + VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block + and before deallocation of the old block. + + In many cases, these three client requests will not be enough to get your + allocator working well with Memcheck. More specifically, if your allocator + writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call + will be necessary to mark the memory as addressable just before the zeroing + occurs, otherwise you'll get a lot of invalid write errors. For example, + you'll need to do this if your allocator recycles freed blocks, but it + zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). + Alternatively, if your allocator reuses freed blocks for allocator-internal + data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. + + Really, what's happening is a blurring of the lines between the client + program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the + memory should be considered unaddressable to the client program, but the + allocator knows more than the rest of the client program and so may be able + to safely access it. Extra client requests are necessary for Valgrind to + understand the distinction between the allocator and the rest of the + program. + + Ignored if addr == 0. +*/ +#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ + addr, sizeB, rzB, is_zeroed, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ + addr, oldSizeB, newSizeB, rzB, 0) + +/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. + Ignored if addr == 0. +*/ +#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ + addr, rzB, 0, 0, 0) + +/* Create a memory pool. */ +#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, 0, 0) + +/* Create a memory pool with some flags specifying extended behaviour. + When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL. + + The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory + associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used + by the application as superblocks to dole out MALLOC_LIKE blocks using + VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels" + pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC. + The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK. + Note that the association between the pool and the second level blocks + is implicit : second level blocks will be located inside first level + blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag + for such 2 levels pools, as otherwise valgrind will detect overlapping + memory blocks, and will abort execution (e.g. during leak search). + + Such a meta pool can also be marked as an 'auto free' pool using the flag + VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the + VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE + will automatically free the second level blocks that are contained + inside the first level block freed with VALGRIND_MEMPOOL_FREE. + In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls + to VALGRIND_FREELIKE_BLOCK for all the second level blocks included + in the first level block. + Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag + without the VALGRIND_MEMPOOL_METAPOOL flag. +*/ +#define VALGRIND_MEMPOOL_AUTO_FREE 1 +#define VALGRIND_MEMPOOL_METAPOOL 2 +#define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ + pool, rzB, is_zeroed, flags, 0) + +/* Destroy a memory pool. */ +#define VALGRIND_DESTROY_MEMPOOL(pool) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ + pool, 0, 0, 0, 0) + +/* Associate a piece of memory with a memory pool. */ +#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ + pool, addr, size, 0, 0) + +/* Disassociate a piece of memory from a memory pool. */ +#define VALGRIND_MEMPOOL_FREE(pool, addr) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ + pool, addr, 0, 0, 0) + +/* Disassociate any pieces outside a particular range. */ +#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ + pool, addr, size, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ + poolA, poolB, 0, 0, 0) + +/* Resize and/or move a piece associated with a memory pool. */ +#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ + pool, addrA, addrB, size, 0) + +/* Return 1 if a mempool exists, else 0. */ +#define VALGRIND_MEMPOOL_EXISTS(pool) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MEMPOOL_EXISTS, \ + pool, 0, 0, 0, 0) + +/* Mark a piece of memory as being a stack. Returns a stack id. + start is the lowest addressable stack byte, end is the highest + addressable stack byte. */ +#define VALGRIND_STACK_REGISTER(start, end) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__STACK_REGISTER, \ + start, end, 0, 0, 0) + +/* Unmark the piece of memory associated with a stack id as being a + stack. */ +#define VALGRIND_STACK_DEREGISTER(id) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ + id, 0, 0, 0, 0) + +/* Change the start and end address of the stack id. + start is the new lowest addressable stack byte, end is the new highest + addressable stack byte. */ +#define VALGRIND_STACK_CHANGE(id, start, end) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ + id, start, end, 0, 0) + +/* Load PDB debug info for Wine PE image_map. */ +#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ + fd, ptr, total_size, delta, 0) + +/* Map a code address to a source file name and line number. buf64 + must point to a 64-byte buffer in the caller's address space. The + result will be dumped in there and is guaranteed to be zero + terminated. If no info is found, the first byte is set to zero. */ +#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ + (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ + VG_USERREQ__MAP_IP_TO_SRCLOC, \ + addr, buf64, 0, 0, 0) + +/* Disable error reporting for this thread. Behaves in a stack like + way, so you can safely call this multiple times provided that + VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times + to re-enable reporting. The first call of this macro disables + reporting. Subsequent calls have no effect except to increase the + number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable + reporting. Child threads do not inherit this setting from their + parents -- they are always created with reporting enabled. */ +#define VALGRIND_DISABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + 1, 0, 0, 0, 0) + +/* Re-enable error reporting, as per comments on + VALGRIND_DISABLE_ERROR_REPORTING. */ +#define VALGRIND_ENABLE_ERROR_REPORTING \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ + -1, 0, 0, 0, 0) + +/* Execute a monitor command from the client program. + If a connection is opened with GDB, the output will be sent + according to the output mode set for vgdb. + If no connection is opened, output will go to the log output. + Returns 1 if command not recognised, 0 otherwise. */ +#define VALGRIND_MONITOR_COMMAND(command) \ + VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ + command, 0, 0, 0, 0) + + +/* Change the value of a dynamic command line option. + Note that unknown or not dynamically changeable options + will cause a warning message to be output. */ +#define VALGRIND_CLO_CHANGE(option) \ + VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \ + option, 0, 0, 0, 0) + + +#undef PLAT_x86_darwin +#undef PLAT_amd64_darwin +#undef PLAT_x86_win32 +#undef PLAT_amd64_win64 +#undef PLAT_x86_linux +#undef PLAT_amd64_linux +#undef PLAT_ppc32_linux +#undef PLAT_ppc64be_linux +#undef PLAT_ppc64le_linux +#undef PLAT_arm_linux +#undef PLAT_s390x_linux +#undef PLAT_mips32_linux +#undef PLAT_mips64_linux +#undef PLAT_nanomips_linux +#undef PLAT_x86_solaris +#undef PLAT_amd64_solaris + +#endif /* __VALGRIND_H */