applied-ai-018 commited on
Commit
dc9dbdd
·
verified ·
1 Parent(s): 6d7e0eb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__init__.py +55 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/_lazy/computation.py +26 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/_lazy/config.py +16 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/_lazy/debug.py +21 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py +223 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py +48 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/amp/__init__.py +1 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/amp/autocast_mode.py +436 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/cpu/__init__.py +157 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py +118 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py +6 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py +117 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py +395 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/masked/__init__.py +37 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/masked/_docs.py +1177 -0
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+
3
+ import torch._C._lazy
4
+ from torch.utils._pytree import tree_flatten, tree_unflatten
5
+
6
+ from .closure import add_step_closure, run_step_closures
7
+
8
+
9
+ def mark_step(device: str = "", wait=False):
10
+ """Triggers a mark step, which amounts to
11
+ - collecting a group of 'live' lazy tensors to index into the compilation cache
12
+ (lowering/compiling their IR graphs if not cached)
13
+ - kicking off execution of the compiled function
14
+ - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)
15
+ """
16
+ # TODO(whc) expand this to include backend hooks and align with XLA backend needs
17
+ torch._C._lazy._mark_step(device, [], wait=wait)
18
+
19
+ run_step_closures()
20
+
21
+
22
+ def wait_device_ops(devices=None):
23
+ """Waits for all the async operations on the given devices to complete.
24
+ Args:
25
+ devices (string..., optional): The devices whose async ops need to be waited
26
+ for. If empty, all the local devices will be waited for.
27
+ """
28
+ if devices is None:
29
+ devices = []
30
+ torch._C._lazy._wait_device_ops(devices=devices)
31
+
32
+
33
+ def sync_multi(tensors, devices):
34
+ """
35
+ Sync the list of lazy tensors so there IR get lowered for the activate backend
36
+ and the compiled computation graph get cached.
37
+ """
38
+ torch._C._lazy._sync_multi(tensors, devices)
39
+
40
+
41
+ def get_tensor_id(tensor):
42
+ """Return a unique id of the lazy tensor maintained by LTC"""
43
+ return torch._C._lazy._get_tensor_id(tensor)
44
+
45
+
46
+ def to_cpu(tensors, devices=None):
47
+ devices = devices or ["lazy"]
48
+
49
+ flattened, spec = tree_flatten(tensors)
50
+ sync_multi(flattened, devices)
51
+ return tree_unflatten([t.to("cpu") for t in flattened], spec)
52
+
53
+
54
+ def save(tensors, *args, **kwargs):
55
+ torch.save(to_cpu(tensors), *args, **kwargs)
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc ADDED
Binary file (5.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc ADDED
Binary file (815 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc ADDED
Binary file (943 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc ADDED
Binary file (7.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc ADDED
Binary file (644 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc ADDED
Binary file (983 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc ADDED
Binary file (729 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc ADDED
Binary file (406 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_lazy/computation.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+ import torch._C._lazy_ts_backend
3
+
4
+
5
+ def get_tensors_ts_device_data_node(tensors):
6
+ """Return tensor ids and eager tensors for DeviceData nodes in the
7
+ IR for the passed in lazy tensors.
8
+
9
+ TODO: This API is currently ts backend specific. We are working on
10
+ generalizing it to all backends including XLA.
11
+ """
12
+ return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors)
13
+
14
+
15
+ def get_graph_hash(tensors):
16
+ """Return the graph hash for the passed in lazy tensors"""
17
+ return torch._C._lazy._get_graph_hash(tensors)
18
+
19
+
20
+ def run_cached_graph(hash_str, graph_inputs):
21
+ """Running the cached computation graph with the given inputs
22
+
23
+ TODO: This API is currently ts backend specific. We are working on
24
+ generalizing it to all backends including XLA.
25
+ """
26
+ return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs)
env-llmeval/lib/python3.10/site-packages/torch/_lazy/config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def get_force_fallback():
5
+ """Get the config used to force LTC fallback"""
6
+ return torch._C._lazy._get_force_fallback()
7
+
8
+
9
+ def set_force_fallback(configval):
10
+ """Set the config used to force LTC fallback"""
11
+ torch._C._lazy._set_force_fallback(configval)
12
+
13
+
14
+ def set_reuse_ir(val: bool):
15
+ """Set the config to reuse IR nodes for faster tracing"""
16
+ torch._C._lazy._set_reuse_ir(val)
env-llmeval/lib/python3.10/site-packages/torch/_lazy/debug.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def render_ir_graph(tensors):
5
+ """Return a text dump of the LTC IR graph in dot format for the tensors.
6
+ The text can be processed by tools like dot to be rendered in pdf,png etc."""
7
+ return torch._C._lazy._get_tensors_dot(tensors)
8
+
9
+
10
+ def dump_ir(tensors, ir_format):
11
+ """Return a dump of the tensors in the specified format.
12
+ Valid format are
13
+ - text: for LTC IR
14
+ - backend: for the activate backend IR
15
+ """
16
+ if ir_format == "text":
17
+ return torch._C._lazy._get_tensors_text(tensors)
18
+ elif ir_format == "backend":
19
+ return torch._C._lazy._get_tensors_backend(tensors)
20
+ else:
21
+ raise RuntimeError(f"Unrecognized IR format: {ir_format}")
env-llmeval/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ import itertools
4
+ import os
5
+ from typing import Any, Callable, Dict, List
6
+
7
+ import torch
8
+ import torch._lazy as lazy
9
+ import torch._lazy.metrics as metrics
10
+ from torch import fx
11
+ from torch._lazy import computation, debug as lazy_debug
12
+ from torch._lazy.tensor_factory_functions import tensor_factory_functions
13
+
14
+ debug = os.environ.get("debug_extract_compiled_graph") is not None
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class GraphInputMatcher:
19
+ """
20
+ The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing.
21
+ Specifically, those graph inputs corresponding to method parameters should be replaced with the
22
+ arguments for the current call.
23
+
24
+ tensor_id_to_arg_idx maps the tensor id to the parameter index.
25
+ graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the
26
+ TS/XLA graph inputs.
27
+ """
28
+
29
+ tensor_id_to_arg_idx: Dict[int, int]
30
+ graph_input_tensor_ids: List[int]
31
+ # there are 2 categories of graph_input_tensors.
32
+ # Category 1: those whose id are not found in tensor_id_to_arg_idx. These are
33
+ # most likely const tensors and we can get its content from graph_input_tensors
34
+ # Category 2: those whose id are found in tensor_id_to_arg_idx. We should get
35
+ # the tensor from method arguments
36
+ graph_input_ivalues: List[Any]
37
+
38
+ # get the real graph input tensors
39
+ def __call__(self, args):
40
+ real_input = []
41
+ for tensor_id, traced_ivalue in zip(
42
+ self.graph_input_tensor_ids, self.graph_input_ivalues
43
+ ):
44
+ arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None)
45
+ if arg_idx is None:
46
+ inp = traced_ivalue
47
+ else:
48
+ inp = args[arg_idx]
49
+ real_input.append(inp)
50
+ return real_input
51
+
52
+
53
+ class ReturnValueHandler:
54
+ r"""
55
+ When ltc_sync_multi is called on multi tensors, the compiled graph
56
+ will contain output only for unique tensors - if a tensor appears multiple
57
+ times in the input to _ltc_sync_multi, only the first occurance matters.
58
+
59
+ However from python level, we still expect multi tensors returned with duplciation
60
+ even if the TS graph dedup the output. e.g. for method:
61
+
62
+ def forward(self, a):
63
+ return a, a
64
+
65
+ the TS graph captured by LTC will return a single tensor, but Python method expects 2.
66
+
67
+ This class dedup the lazy tensors first to get the index that will be used
68
+ to duplicate the eager tensors later.
69
+ """
70
+
71
+ def __init__(self, lazy_out_list):
72
+ self.index: List[List[int]] = []
73
+ self.total_count = len(lazy_out_list)
74
+
75
+ tensor_id_to_idx: Dict[int, int] = {}
76
+ for dup_idx, lazy_tensor in enumerate(lazy_out_list):
77
+ uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None)
78
+ if uniq_idx is not None:
79
+ self.index[uniq_idx].append(dup_idx)
80
+ else:
81
+ uniq_idx = len(self.index)
82
+ self.index.append([dup_idx])
83
+ tensor_id_to_idx[id(lazy_tensor)] = uniq_idx
84
+
85
+ def duplicate_eager_tensors(self, eager_tensor_list):
86
+ duplicated_list = [None] * self.total_count
87
+ assert len(eager_tensor_list) == len(self.index)
88
+
89
+ for uniq_idx, eager_tensor in enumerate(eager_tensor_list):
90
+ for dup_idx in self.index[uniq_idx]:
91
+ duplicated_list[dup_idx] = eager_tensor
92
+ return duplicated_list
93
+
94
+
95
+ def force_lazy_device(model: fx.GraphModule):
96
+ """
97
+ Factory methods in a Fx graph may create tensors for a specific eager devices.
98
+ If we take no actions, those eager tensors will be mixed with lazy tensors and
99
+ cause crash. This method overwrite those eager device to lazy device.
100
+ """
101
+
102
+ def tolazydevice(dev):
103
+ if isinstance(dev, torch.device):
104
+ return torch.device("lazy", index=dev.index)
105
+ return dev
106
+
107
+ def hasDeviceArg(args, kwargs):
108
+ return any(
109
+ isinstance(arg, torch.device)
110
+ for arg in itertools.chain(args, kwargs.values())
111
+ )
112
+
113
+ for nd in model.graph.nodes:
114
+ nd.args = tuple(tolazydevice(arg) for arg in nd.args)
115
+ nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()}
116
+
117
+ # For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return
118
+ # eager tensors on the default device
119
+ # (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove,
120
+ # and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart).
121
+ # To force those tensors on the lazy device, we can not simply override
122
+ # the device argument since there is no explicit device argument.
123
+ # What we are doing here is, for the list of covered tensor factory methods
124
+ # we add a lazy device argument explicity.
125
+ #
126
+ # TODO: This solution is no ideal since we may miss some factory methods. In future
127
+ # when we support lazy mode, this method can be replaced by that.
128
+ if nd.target in tensor_factory_functions and not hasDeviceArg(
129
+ nd.args, nd.kwargs
130
+ ):
131
+ kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy.
132
+ kwargs["device"] = torch.device("lazy")
133
+ nd.kwargs = kwargs
134
+
135
+ model.recompile()
136
+
137
+
138
+ def get_fallback_ops():
139
+ fallback_ops = []
140
+ for opname in metrics.counter_names():
141
+ if "aten::" not in opname:
142
+ continue
143
+ val = int(metrics.counter_value(opname))
144
+ if val > 0:
145
+ fallback_ops.append(f"{opname}={val}")
146
+
147
+ return fallback_ops
148
+
149
+
150
+ def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable:
151
+ """
152
+ Optimize an eager model with LTC and returns a wrapper to execute the
153
+ compiled graph directly without retracing. It depends on other mechanisms
154
+ like TorchDynamo guards to guarantee the returned wrapper is only called
155
+ when it's safe.
156
+ """
157
+ lazy_args = [arg.to(device="lazy") for arg in example_inputs]
158
+ args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args]
159
+ tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)}
160
+ lazy_model = copy.deepcopy(model).to(device=torch.device("lazy"))
161
+ force_lazy_device(lazy_model)
162
+
163
+ # This line executes lazy tracing and enable us extracting compiled graph later
164
+ metrics.reset()
165
+ lazy_out = lazy_model(*lazy_args)
166
+ fallback_ops = get_fallback_ops()
167
+ metrics.reset()
168
+
169
+ if len(fallback_ops) > 0:
170
+ raise RuntimeError(
171
+ f"Fail to extact the compiled graph because of fallback: {','.join(fallback_ops)}"
172
+ )
173
+
174
+ if not isinstance(lazy_out, (tuple, list)):
175
+ lazy_out = (lazy_out,)
176
+
177
+ args_and_out = tuple(lazy_args) + tuple(lazy_out)
178
+ return_value_handler = ReturnValueHandler(args_and_out)
179
+ if debug:
180
+ print("Fx code:\n", model.code)
181
+ print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text"))
182
+
183
+ # TODO: this part is TS backend specific for now and will be generalized to
184
+ # support XLA
185
+ (
186
+ graph_input_tensor_ids,
187
+ graph_input_ivalues,
188
+ ) = computation.get_tensors_ts_device_data_node(args_and_out)
189
+ assert len(graph_input_tensor_ids) == len(graph_input_ivalues)
190
+ graph_input_matcher = GraphInputMatcher(
191
+ tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues
192
+ )
193
+
194
+ graph_hash = computation.get_graph_hash(args_and_out)
195
+
196
+ if debug:
197
+ print("graph_hash", graph_hash)
198
+ print(f"args_tensor_ids {args_tensor_ids}")
199
+ print("tensor ids from device data:", graph_input_tensor_ids)
200
+
201
+ # sync the list of output tensors so the computation graph for these
202
+ # tensors will be cached. Those computation graphs can be retrieved
203
+ # by graph hash later.
204
+ lazy.sync_multi(args_and_out, [])
205
+
206
+ def optimized_mod(*args):
207
+ if len(args_and_out) == 0:
208
+ return ()
209
+ graph_input = graph_input_matcher(args)
210
+ res = return_value_handler.duplicate_eager_tensors(
211
+ computation.run_cached_graph(graph_hash, graph_input)
212
+ )
213
+
214
+ assert len(res) == len(args_and_out)
215
+ for i, arg in enumerate(args):
216
+ # only copy those tensors that get inplace updated
217
+ if arg is not res[i]:
218
+ arg.copy_(res[i])
219
+
220
+ # skip the args
221
+ return res[len(args) :]
222
+
223
+ return optimized_mod
env-llmeval/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ """
4
+ tensor_factory_functions defines the list of torch functions that create tensors.
5
+ The list is grabbed by searching thru native_functions.yaml by the following
6
+ regular expression:
7
+
8
+ cat native_functions.yaml | grep 'func:' | grep -v "Tensor.*->" | grep "[-]>.*Tensor"
9
+
10
+ It's possible that new tensor factory functions are added making this list stale.
11
+ Use at your own risk or regenerate the list.
12
+ """
13
+ tensor_factory_functions = (
14
+ torch._cudnn_init_dropout_state,
15
+ torch.arange,
16
+ torch.bartlett_window,
17
+ torch.blackman_window,
18
+ torch._empty_affine_quantized,
19
+ torch.empty_strided,
20
+ torch.eye,
21
+ torch.full,
22
+ torch.from_file,
23
+ torch.hann_window,
24
+ torch.hamming_window,
25
+ torch.kaiser_window,
26
+ torch.linspace,
27
+ torch.logspace,
28
+ torch.ones,
29
+ torch.scalar_tensor,
30
+ torch.rand,
31
+ torch.randint,
32
+ torch.randn,
33
+ torch.randperm,
34
+ torch.range,
35
+ torch._efficientzerotensor,
36
+ torch.zeros,
37
+ torch.tril_indices,
38
+ torch.triu_indices,
39
+ # Note: the following functions match the regular expression search above but
40
+ # they are not available in the torch module. Comment out.
41
+ # torch._sparse_coo_tensor_with_dims,
42
+ # torch.fft_fftfreq,
43
+ # torch.fft_rfftfreq,
44
+ ) + (
45
+ # torch.tensor is special since it's not in native_functions.yaml
46
+ # add it separately
47
+ torch.tensor,
48
+ )
env-llmeval/lib/python3.10/site-packages/torch/amp/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .autocast_mode import _enter_autocast, _exit_autocast, autocast
env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (271 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/amp/autocast_mode.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import warnings
3
+
4
+ from typing import Any, Optional
5
+
6
+ import torch
7
+ from torch.types import _dtype
8
+
9
+ __all__ = ["autocast_decorator", "autocast"]
10
+
11
+
12
+ def autocast_decorator(autocast_instance, func):
13
+ @functools.wraps(func)
14
+ def decorate_autocast(*args, **kwargs):
15
+ with autocast_instance:
16
+ return func(*args, **kwargs)
17
+
18
+ decorate_autocast.__script_unsupported = "@autocast() decorator is not supported in script mode" # type: ignore[attr-defined]
19
+ return decorate_autocast
20
+
21
+
22
+ class autocast:
23
+ r"""
24
+ Instances of :class:`autocast` serve as context managers or decorators that
25
+ allow regions of your script to run in mixed precision.
26
+
27
+ In these regions, ops run in an op-specific dtype chosen by autocast
28
+ to improve performance while maintaining accuracy.
29
+ See the :ref:`Autocast Op Reference<autocast-op-reference>` for details.
30
+
31
+ When entering an autocast-enabled region, Tensors may be any type.
32
+ You should not call ``half()`` or ``bfloat16()`` on your model(s) or inputs when using autocasting.
33
+
34
+ :class:`autocast` should wrap only the forward pass(es) of your network, including the loss
35
+ computation(s). Backward passes under autocast are not recommended.
36
+ Backward ops run in the same type that autocast used for corresponding forward ops.
37
+
38
+ Example for CUDA Devices::
39
+
40
+ # Creates model and optimizer in default precision
41
+ model = Net().cuda()
42
+ optimizer = optim.SGD(model.parameters(), ...)
43
+
44
+ for input, target in data:
45
+ optimizer.zero_grad()
46
+
47
+ # Enables autocasting for the forward pass (model + loss)
48
+ with torch.autocast(device_type="cuda"):
49
+ output = model(input)
50
+ loss = loss_fn(output, target)
51
+
52
+ # Exits the context manager before backward()
53
+ loss.backward()
54
+ optimizer.step()
55
+
56
+ See the :ref:`CUDA Automatic Mixed Precision examples<amp-examples>` for usage (along with gradient scaling)
57
+ in more complex scenarios (e.g., gradient penalty, multiple models/losses, custom autograd functions).
58
+
59
+ :class:`autocast` can also be used as a decorator, e.g., on the ``forward`` method of your model::
60
+
61
+ class AutocastModel(nn.Module):
62
+ ...
63
+ @torch.autocast(device_type="cuda")
64
+ def forward(self, input):
65
+ ...
66
+
67
+ Floating-point Tensors produced in an autocast-enabled region may be ``float16``.
68
+ After returning to an autocast-disabled region, using them with floating-point
69
+ Tensors of different dtypes may cause type mismatch errors. If so, cast the Tensor(s)
70
+ produced in the autocast region back to ``float32`` (or other dtype if desired).
71
+ If a Tensor from the autocast region is already ``float32``, the cast is a no-op,
72
+ and incurs no additional overhead.
73
+ CUDA Example::
74
+
75
+ # Creates some tensors in default dtype (here assumed to be float32)
76
+ a_float32 = torch.rand((8, 8), device="cuda")
77
+ b_float32 = torch.rand((8, 8), device="cuda")
78
+ c_float32 = torch.rand((8, 8), device="cuda")
79
+ d_float32 = torch.rand((8, 8), device="cuda")
80
+
81
+ with torch.autocast(device_type="cuda"):
82
+ # torch.mm is on autocast's list of ops that should run in float16.
83
+ # Inputs are float32, but the op runs in float16 and produces float16 output.
84
+ # No manual casts are required.
85
+ e_float16 = torch.mm(a_float32, b_float32)
86
+ # Also handles mixed input types
87
+ f_float16 = torch.mm(d_float32, e_float16)
88
+
89
+ # After exiting autocast, calls f_float16.float() to use with d_float32
90
+ g_float32 = torch.mm(d_float32, f_float16.float())
91
+
92
+ CPU Training Example::
93
+
94
+ # Creates model and optimizer in default precision
95
+ model = Net()
96
+ optimizer = optim.SGD(model.parameters(), ...)
97
+
98
+ for epoch in epochs:
99
+ for input, target in data:
100
+ optimizer.zero_grad()
101
+
102
+ # Runs the forward pass with autocasting.
103
+ with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
104
+ output = model(input)
105
+ loss = loss_fn(output, target)
106
+
107
+ loss.backward()
108
+ optimizer.step()
109
+
110
+
111
+ CPU Inference Example::
112
+
113
+ # Creates model in default precision
114
+ model = Net().eval()
115
+
116
+ with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
117
+ for input in data:
118
+ # Runs the forward pass with autocasting.
119
+ output = model(input)
120
+
121
+ CPU Inference Example with Jit Trace::
122
+
123
+ class TestModel(nn.Module):
124
+ def __init__(self, input_size, num_classes):
125
+ super().__init__()
126
+ self.fc1 = nn.Linear(input_size, num_classes)
127
+ def forward(self, x):
128
+ return self.fc1(x)
129
+
130
+ input_size = 2
131
+ num_classes = 2
132
+ model = TestModel(input_size, num_classes).eval()
133
+
134
+ # For now, we suggest to disable the Jit Autocast Pass,
135
+ # As the issue: https://github.com/pytorch/pytorch/issues/75956
136
+ torch._C._jit_set_autocast_mode(False)
137
+
138
+ with torch.cpu.amp.autocast(cache_enabled=False):
139
+ model = torch.jit.trace(model, torch.randn(1, input_size))
140
+ model = torch.jit.freeze(model)
141
+ # Models Run
142
+ for _ in range(3):
143
+ model(torch.randn(1, input_size))
144
+
145
+ Type mismatch errors *in* an autocast-enabled region are a bug; if this is what you observe,
146
+ please file an issue.
147
+
148
+ ``autocast(enabled=False)`` subregions can be nested in autocast-enabled regions.
149
+ Locally disabling autocast can be useful, for example, if you want to force a subregion
150
+ to run in a particular ``dtype``. Disabling autocast gives you explicit control over
151
+ the execution type. In the subregion, inputs from the surrounding region
152
+ should be cast to ``dtype`` before use::
153
+
154
+ # Creates some tensors in default dtype (here assumed to be float32)
155
+ a_float32 = torch.rand((8, 8), device="cuda")
156
+ b_float32 = torch.rand((8, 8), device="cuda")
157
+ c_float32 = torch.rand((8, 8), device="cuda")
158
+ d_float32 = torch.rand((8, 8), device="cuda")
159
+
160
+ with torch.autocast(device_type="cuda"):
161
+ e_float16 = torch.mm(a_float32, b_float32)
162
+ with torch.autocast(device_type="cuda", enabled=False):
163
+ # Calls e_float16.float() to ensure float32 execution
164
+ # (necessary because e_float16 was created in an autocasted region)
165
+ f_float32 = torch.mm(c_float32, e_float16.float())
166
+
167
+ # No manual casts are required when re-entering the autocast-enabled region.
168
+ # torch.mm again runs in float16 and produces float16 output, regardless of input types.
169
+ g_float16 = torch.mm(d_float32, f_float32)
170
+
171
+ The autocast state is thread-local. If you want it enabled in a new thread, the context manager or decorator
172
+ must be invoked in that thread. This affects :class:`torch.nn.DataParallel` and
173
+ :class:`torch.nn.parallel.DistributedDataParallel` when used with more than one GPU per process
174
+ (see :ref:`Working with Multiple GPUs<amp-multigpu>`).
175
+
176
+ Args:
177
+ device_type(str, required): Device type to use. Possible values are: 'cuda', 'cpu', 'xpu' and 'hpu'.
178
+ The type is the same as the `type` attribute of a :class:`torch.device`.
179
+ Thus, you may obtain the device type of a tensor using `Tensor.device.type`.
180
+ enabled(bool, optional): Whether autocasting should be enabled in the region.
181
+ Default: ``True``
182
+ dtype(torch_dtype, optional): Whether to use torch.float16 or torch.bfloat16.
183
+ cache_enabled(bool, optional): Whether the weight cache inside autocast should be enabled.
184
+ Default: ``True``
185
+ """
186
+
187
+ def __init__(
188
+ self,
189
+ device_type: str,
190
+ dtype: Optional[_dtype] = None,
191
+ enabled: bool = True,
192
+ cache_enabled: Optional[bool] = None,
193
+ ):
194
+ if torch._jit_internal.is_scripting():
195
+ self._enabled = enabled
196
+ self.device = device_type
197
+ self.fast_dtype = dtype
198
+ # TODO: support get_autocast_gpu/cpu_dtype
199
+ assert dtype is not None
200
+ return
201
+ self.device = device_type
202
+ self.custom_backend_name = torch._C._get_privateuse1_backend_name()
203
+ if self.device == "cuda":
204
+ self.fast_dtype = torch.get_autocast_gpu_dtype()
205
+ elif self.device == "cpu":
206
+ self.fast_dtype = torch.get_autocast_cpu_dtype()
207
+ elif self.device == "xpu":
208
+ self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
209
+ elif self.device == "ipu":
210
+ self.fast_dtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined]
211
+ elif self.device == "hpu":
212
+ self.fast_dtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined]
213
+ elif self.device == "xla":
214
+ self.fast_dtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined]
215
+ elif self.device == self.custom_backend_name:
216
+ necessary_funcs = [
217
+ "is_autocast_enabled",
218
+ "set_autocast_enabled",
219
+ "get_autocast_dtype",
220
+ "set_autocast_dtype",
221
+ "get_amp_supported_dtype",
222
+ ]
223
+ message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not "
224
+ message += "registered a module or the module miss some necessary funcs. The backend should register "
225
+ message += "a module by `torch._register_device_module`, and the module must have these funcs: \n"
226
+ message += "`is_autocast_enabled() -> bool`, `set_autocast_enabled(bool) -> None`, "
227
+ message += "`get_autocast_dtype() -> torch.dtype`, `set_autocast_dtype(torch.dtype) "
228
+ message += (
229
+ "-> None` and `get_amp_supported_dtype() -> List[torch.dtype]`. \n"
230
+ )
231
+
232
+ assert hasattr(torch, self.custom_backend_name), message
233
+ self.custom_device_mod = getattr(torch, self.custom_backend_name)
234
+ for func in necessary_funcs:
235
+ assert hasattr(self.custom_device_mod, func), (
236
+ message + f"But the func `{func}` is missing. \n"
237
+ )
238
+
239
+ self.fast_dtype = self.custom_device_mod.get_autocast_dtype()
240
+ else:
241
+ raise RuntimeError(
242
+ f"User specified an unsupported autocast device_type '{self.device}'"
243
+ )
244
+ self._cache_enabled = torch.is_autocast_cache_enabled()
245
+ if (
246
+ enabled
247
+ and torch.cuda.amp.common.amp_definitely_not_available()
248
+ and self.device == "cuda"
249
+ ):
250
+ warnings.warn(
251
+ "User provided device_type of 'cuda', but CUDA is not available. Disabling"
252
+ )
253
+ enabled = False
254
+ if dtype is not None:
255
+ self.fast_dtype = dtype
256
+ if cache_enabled is not None:
257
+ self._cache_enabled = cache_enabled
258
+
259
+ if self.device == "cpu":
260
+ supported_dtype = [torch.bfloat16, torch.float16]
261
+ if self.fast_dtype not in supported_dtype and enabled:
262
+ error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n"
263
+ error_message += "CPU Autocast only supports dtype of "
264
+ error_message += (
265
+ ", ".join(str(dtype) for dtype in supported_dtype) + " currently."
266
+ )
267
+ warnings.warn(error_message)
268
+ enabled = False
269
+ elif self.device == "xpu":
270
+ supported_dtype = [torch.bfloat16, torch.float16]
271
+ if self.fast_dtype not in supported_dtype:
272
+ error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n"
273
+ error_message += "XPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently."
274
+ warnings.warn(error_message)
275
+ enabled = False
276
+ elif self.device == "ipu":
277
+ supported_dtypes = [torch.bfloat16, torch.float16]
278
+ if self.fast_dtype not in supported_dtypes:
279
+ error_message = "In IPU autocast, but the target dtype is not supported. Disabling autocast.\n"
280
+ error_message += "IPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently."
281
+ warnings.warn(error_message)
282
+ enabled = False
283
+ elif self.device == "hpu":
284
+ supported_dtype = [torch.bfloat16, torch.float16]
285
+ if self.fast_dtype not in supported_dtype:
286
+ error_message = "In HPU autocast, but the target dtype is not supported. Disabling autocast.\n"
287
+ error_message += "HPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently."
288
+ warnings.warn(error_message)
289
+ enabled = False
290
+ elif self.device == self.custom_backend_name:
291
+ supported_dtype = self.custom_device_mod.get_amp_supported_dtype()
292
+ if self.fast_dtype not in supported_dtype:
293
+ error_message = f"In {self.custom_backend_name} autocast, but the target dtype is not supported. "
294
+ error_message += f"Disabling autocast.\n {self.custom_backend_name} Autocast only supports dtypes of "
295
+ error_message += (
296
+ ", ".join(str(dtype) for dtype in supported_dtype) + " currently."
297
+ )
298
+ warnings.warn(error_message)
299
+ enabled = False
300
+ elif self.device == "cuda":
301
+ if (
302
+ enabled
303
+ and self.fast_dtype == torch.bfloat16
304
+ and not torch.cuda.is_bf16_supported()
305
+ ):
306
+ raise RuntimeError(
307
+ "Current CUDA Device does not support bfloat16. Please switch dtype to float16."
308
+ )
309
+ elif self.device == "xla":
310
+ supported_dtype = [torch.float16, torch.bfloat16]
311
+ if self.fast_dtype not in supported_dtype:
312
+ error_message = "In XLA autocast, but the target dtype is not supported. Disabling autocast.\n"
313
+ error_message += (
314
+ "XLA Autocast only supports dtype of torch.bfloat16 currently."
315
+ )
316
+ warnings.warn(error_message)
317
+ enabled = False
318
+ self._enabled = enabled
319
+
320
+ def __enter__(self):
321
+ if torch._jit_internal.is_scripting():
322
+ assert self.fast_dtype is not None
323
+ return self
324
+
325
+ self.prev_cache_enabled = torch.is_autocast_cache_enabled()
326
+ if self.device == "cpu":
327
+ self.prev = torch.is_autocast_cpu_enabled()
328
+ self.prev_fastdtype = torch.get_autocast_cpu_dtype()
329
+ torch.set_autocast_cpu_enabled(self._enabled)
330
+ torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type]
331
+ torch.autocast_increment_nesting()
332
+ elif self.device == "xpu":
333
+ self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined]
334
+ self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
335
+ torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined]
336
+ torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
337
+ torch.autocast_increment_nesting()
338
+ elif self.device == "ipu":
339
+ self.prev = torch.is_autocast_ipu_enabled() # type: ignore[attr-defined]
340
+ self.prev_fastdtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined]
341
+ torch.set_autocast_ipu_enabled(self._enabled) # type: ignore[attr-defined]
342
+ torch.set_autocast_ipu_dtype(self.fast_dtype) # type: ignore[attr-defined]
343
+ torch.autocast_increment_nesting()
344
+ elif self.device == "hpu":
345
+ self.prev = torch.hpu.is_autocast_hpu_enabled() # type: ignore[attr-defined]
346
+ self.prev_fastdtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined]
347
+ torch.hpu.set_autocast_hpu_enabled(self._enabled) # type: ignore[attr-defined]
348
+ torch.hpu.set_autocast_hpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
349
+ torch.autocast_increment_nesting()
350
+ elif self.device == "xla":
351
+ self.prev = torch.is_autocast_xla_enabled() # type: ignore[attr-defined]
352
+ self.prev_fastdtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined]
353
+ torch.set_autocast_xla_enabled(self._enabled) # type: ignore[attr-defined]
354
+ torch.set_autocast_xla_dtype(self.fast_dtype) # type: ignore[attr-defined]
355
+ torch.autocast_increment_nesting()
356
+ elif self.device == self.custom_backend_name:
357
+ self.prev = self.custom_device_mod.is_autocast_enabled()
358
+ self.prev_fastdtype = self.custom_device_mod.get_autocast_dtype()
359
+ self.custom_device_mod.set_autocast_enabled(self._enabled)
360
+ self.custom_device_mod.set_autocast_dtype(self.fast_dtype)
361
+ torch.autocast_increment_nesting()
362
+ else:
363
+ self.prev = torch.is_autocast_enabled()
364
+ self.prev_fastdtype = torch.get_autocast_gpu_dtype()
365
+ torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type]
366
+ torch.set_autocast_enabled(self._enabled)
367
+ torch.autocast_increment_nesting()
368
+ torch.set_autocast_cache_enabled(self._cache_enabled)
369
+
370
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
371
+ if torch._jit_internal.is_scripting():
372
+ return
373
+
374
+ # Drop the cache when we exit to a nesting level that's outside any instance of autocast.
375
+ if self.device == "cpu":
376
+ if torch.autocast_decrement_nesting() == 0:
377
+ torch.clear_autocast_cache()
378
+ torch.set_autocast_cpu_enabled(self.prev)
379
+ torch.set_autocast_cpu_dtype(self.prev_fastdtype)
380
+ elif self.device == "xpu":
381
+ if torch.autocast_decrement_nesting() == 0:
382
+ torch.clear_autocast_cache()
383
+ torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined]
384
+ torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
385
+ elif self.device == "ipu":
386
+ if torch.autocast_decrement_nesting() == 0:
387
+ torch.clear_autocast_cache()
388
+ torch.set_autocast_ipu_enabled(self.prev) # type: ignore[attr-defined]
389
+ torch.set_autocast_ipu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
390
+ elif self.device == "hpu":
391
+ if torch.autocast_decrement_nesting() == 0:
392
+ torch.clear_autocast_cache()
393
+ torch.hpu.set_autocast_hpu_enabled(self.prev) # type: ignore[attr-defined]
394
+ torch.hpu.set_autocast_hpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
395
+ elif self.device == "xla":
396
+ if torch.autocast_decrement_nesting() == 0:
397
+ torch.clear_autocast_cache()
398
+ torch.set_autocast_xla_enabled(self.prev) # type: ignore[attr-defined]
399
+ torch.set_autocast_xla_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
400
+ elif self.device == self.custom_backend_name:
401
+ if torch.autocast_decrement_nesting() == 0:
402
+ torch.clear_autocast_cache()
403
+ self.custom_device_mod.set_autocast_enabled(self.prev)
404
+ self.custom_device_mod.set_autocast_dtype(self.prev_fastdtype)
405
+ else:
406
+ if torch.autocast_decrement_nesting() == 0:
407
+ torch.clear_autocast_cache()
408
+ torch.set_autocast_enabled(self.prev)
409
+ torch.set_autocast_gpu_dtype(self.prev_fastdtype)
410
+ torch.set_autocast_cache_enabled(self.prev_cache_enabled)
411
+ return False
412
+
413
+ def __call__(self, func):
414
+ if torch._jit_internal.is_scripting():
415
+ return func
416
+ return autocast_decorator(self, func)
417
+
418
+
419
+ # These functions aren't meant for public usage.
420
+ # They are what we trace into a graph during pre_dispatch tracing
421
+ # when we encounter an autocast context manager.
422
+ def _enter_autocast(*vals):
423
+ # For pre-dispatch tracing, if a TorchFunction mode is active, we'll want to trace this into a graph.
424
+ if torch._C._is_torch_function_mode_enabled():
425
+ return torch.overrides.handle_torch_function(
426
+ torch.amp._enter_autocast, [], *vals
427
+ )
428
+ mode = torch.amp.autocast(*vals)
429
+ mode.__enter__()
430
+ return mode
431
+
432
+
433
+ def _exit_autocast(mode):
434
+ if torch._C._is_torch_function_mode_enabled():
435
+ return torch.overrides.handle_torch_function(torch.amp._exit_autocast, [], mode)
436
+ mode.__exit__(None, None, None)
env-llmeval/lib/python3.10/site-packages/torch/cpu/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ This package implements abstractions found in ``torch.cuda``
3
+ to facilitate writing device-agnostic code.
4
+ """
5
+
6
+ from contextlib import AbstractContextManager
7
+ from typing import Any, Optional, Union
8
+
9
+ import torch
10
+
11
+ from .. import device as _device
12
+ from . import amp
13
+
14
+ __all__ = [
15
+ "is_available",
16
+ "synchronize",
17
+ "current_device",
18
+ "current_stream",
19
+ "stream",
20
+ "set_device",
21
+ "device_count",
22
+ "Stream",
23
+ "StreamContext",
24
+ "Event",
25
+ ]
26
+
27
+ _device_t = Union[_device, str, int, None]
28
+
29
+
30
+ def _is_cpu_support_vnni() -> bool:
31
+ r"""Returns a bool indicating if CPU supports VNNI."""
32
+ return torch._C._cpu._is_cpu_support_vnni()
33
+
34
+
35
+ def is_available() -> bool:
36
+ r"""Returns a bool indicating if CPU is currently available.
37
+
38
+ N.B. This function only exists to facilitate device-agnostic code
39
+
40
+ """
41
+ return True
42
+
43
+
44
+ def synchronize(device: _device_t = None) -> None:
45
+ r"""Waits for all kernels in all streams on the CPU device to complete.
46
+
47
+ Args:
48
+ device (torch.device or int, optional): ignored, there's only one CPU device.
49
+
50
+ N.B. This function only exists to facilitate device-agnostic code.
51
+ """
52
+ pass
53
+
54
+
55
+ class Stream:
56
+ """
57
+ N.B. This class only exists to facilitate device-agnostic code
58
+ """
59
+
60
+ def __init__(self, priority: int = -1):
61
+ pass
62
+
63
+ def wait_stream(self, stream) -> None:
64
+ pass
65
+
66
+
67
+ class Event:
68
+ def query(self) -> bool:
69
+ return True
70
+
71
+ def record(self, stream=None):
72
+ pass
73
+
74
+ def synchronize(self):
75
+ pass
76
+
77
+ def wait(self, stream=None):
78
+ pass
79
+
80
+
81
+ _default_cpu_stream = Stream()
82
+ _current_stream = _default_cpu_stream
83
+
84
+
85
+ def current_stream(device: _device_t = None) -> Stream:
86
+ r"""Returns the currently selected :class:`Stream` for a given device.
87
+
88
+ Args:
89
+ device (torch.device or int, optional): Ignored.
90
+
91
+ N.B. This function only exists to facilitate device-agnostic code
92
+
93
+ """
94
+ return _current_stream
95
+
96
+
97
+ class StreamContext(AbstractContextManager):
98
+ r"""Context-manager that selects a given stream.
99
+
100
+ N.B. This class only exists to facilitate device-agnostic code
101
+
102
+ """
103
+ cur_stream: Optional[Stream]
104
+
105
+ def __init__(self, stream):
106
+ self.stream = stream
107
+ self.prev_stream = _default_cpu_stream
108
+
109
+ def __enter__(self):
110
+ cur_stream = self.stream
111
+ if cur_stream is None:
112
+ return
113
+
114
+ global _current_stream
115
+ self.prev_stream = _current_stream
116
+ _current_stream = cur_stream
117
+
118
+ def __exit__(self, type: Any, value: Any, traceback: Any):
119
+ cur_stream = self.stream
120
+ if cur_stream is None:
121
+ return
122
+
123
+ global _current_stream
124
+ _current_stream = self.prev_stream
125
+
126
+
127
+ def stream(stream: Stream) -> AbstractContextManager:
128
+ r"""Wrapper around the Context-manager StreamContext that
129
+ selects a given stream.
130
+
131
+ N.B. This function only exists to facilitate device-agnostic code
132
+ """
133
+ return StreamContext(stream)
134
+
135
+
136
+ def device_count() -> int:
137
+ r"""Returns number of CPU devices (not cores). Always 1.
138
+
139
+ N.B. This function only exists to facilitate device-agnostic code
140
+ """
141
+ return 1
142
+
143
+
144
+ def set_device(device: _device_t) -> None:
145
+ r"""Sets the current device, in CPU we do nothing.
146
+
147
+ N.B. This function only exists to facilitate device-agnostic code
148
+ """
149
+ pass
150
+
151
+
152
+ def current_device() -> str:
153
+ r"""Returns current device for cpu. Always 'cpu'.
154
+
155
+ N.B. This function only exists to facilitate device-agnostic code
156
+ """
157
+ return "cpu"
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc ADDED
Binary file (789 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc ADDED
Binary file (5.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc ADDED
Binary file (8.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc ADDED
Binary file (929 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc ADDED
Binary file (4.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc ADDED
Binary file (95.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc ADDED
Binary file (30.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc ADDED
Binary file (475 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc ADDED
Binary file (8.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterator # type: ignore[import]
2
+ from functools import partial
3
+
4
+ from .unification_tools import assoc # type: ignore[import]
5
+ from .utils import transitive_get as walk
6
+ from .variable import isvar
7
+ from .dispatch import dispatch
8
+
9
+ __all__ = ["reify", "unify"]
10
+
11
+ ###############
12
+ # Reification #
13
+ ###############
14
+
15
+ @dispatch(Iterator, dict)
16
+ def _reify(t, s):
17
+ return map(partial(reify, s=s), t)
18
+ # return (reify(arg, s) for arg in t)
19
+ _reify
20
+
21
+ @dispatch(tuple, dict) # type: ignore[no-redef]
22
+ def _reify(t, s):
23
+ return tuple(reify(iter(t), s))
24
+ _reify
25
+
26
+ @dispatch(list, dict) # type: ignore[no-redef]
27
+ def _reify(t, s):
28
+ return list(reify(iter(t), s))
29
+ _reify
30
+
31
+ @dispatch(dict, dict) # type: ignore[no-redef]
32
+ def _reify(d, s):
33
+ return {k: reify(v, s) for k, v in d.items()}
34
+ _reify
35
+
36
+ @dispatch(object, dict) # type: ignore[no-redef]
37
+ def _reify(o, s):
38
+ return o # catch all, just return the object
39
+
40
+ def reify(e, s):
41
+ """ Replace variables of expression with substitution
42
+ >>> # xdoctest: +SKIP
43
+ >>> x, y = var(), var()
44
+ >>> e = (1, x, (3, y))
45
+ >>> s = {x: 2, y: 4}
46
+ >>> reify(e, s)
47
+ (1, 2, (3, 4))
48
+ >>> e = {1: x, 3: (y, 5)}
49
+ >>> reify(e, s)
50
+ {1: 2, 3: (4, 5)}
51
+ """
52
+ if isvar(e):
53
+ return reify(s[e], s) if e in s else e
54
+ return _reify(e, s)
55
+
56
+ ###############
57
+ # Unification #
58
+ ###############
59
+
60
+ seq = tuple, list, Iterator
61
+
62
+ @dispatch(seq, seq, dict)
63
+ def _unify(u, v, s):
64
+ if len(u) != len(v):
65
+ return False
66
+ for uu, vv in zip(u, v): # avoiding recursion
67
+ s = unify(uu, vv, s)
68
+ if s is False:
69
+ return False
70
+ return s
71
+ #
72
+ # @dispatch((set, frozenset), (set, frozenset), dict)
73
+ # def _unify(u, v, s):
74
+ # i = u & v
75
+ # u = u - i
76
+ # v = v - i
77
+ # return _unify(sorted(u), sorted(v), s)
78
+ #
79
+ #
80
+ # @dispatch(dict, dict, dict)
81
+ # def _unify(u, v, s):
82
+ # if len(u) != len(v):
83
+ # return False
84
+ # for key, uval in iteritems(u):
85
+ # if key not in v:
86
+ # return False
87
+ # s = unify(uval, v[key], s)
88
+ # if s is False:
89
+ # return False
90
+ # return s
91
+ #
92
+ #
93
+ # @dispatch(object, object, dict)
94
+ # def _unify(u, v, s):
95
+ # return False # catch all
96
+
97
+
98
+ @dispatch(object, object, dict)
99
+ def unify(u, v, s): # no check at the moment
100
+ """ Find substitution so that u == v while satisfying s
101
+ >>> x = var('x')
102
+ >>> unify((1, x), (1, 2), {})
103
+ {~x: 2}
104
+ """
105
+ u = walk(u, s)
106
+ v = walk(v, s)
107
+ if u == v:
108
+ return s
109
+ if isvar(u):
110
+ return assoc(s, u, v)
111
+ if isvar(v):
112
+ return assoc(s, v, u)
113
+ return _unify(u, v, s)
114
+ unify
115
+
116
+ @dispatch(object, object) # type: ignore[no-redef]
117
+ def unify(u, v):
118
+ return unify(u, v, {})
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from .multipledispatch import dispatch # type: ignore[import]
3
+
4
+ namespace = {} # type: ignore[var-annotated]
5
+
6
+ dispatch = partial(dispatch, namespace=namespace)
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .core import unify, reify # type: ignore[attr-defined]
2
+ from .dispatch import dispatch
3
+
4
+
5
+ def unifiable(cls):
6
+ """ Register standard unify and reify operations on class
7
+ This uses the type and __dict__ or __slots__ attributes to define the
8
+ nature of the term
9
+ See Also:
10
+ >>> # xdoctest: +SKIP
11
+ >>> class A(object):
12
+ ... def __init__(self, a, b):
13
+ ... self.a = a
14
+ ... self.b = b
15
+ >>> unifiable(A)
16
+ <class 'unification.more.A'>
17
+ >>> x = var('x')
18
+ >>> a = A(1, 2)
19
+ >>> b = A(1, x)
20
+ >>> unify(a, b, {})
21
+ {~x: 2}
22
+ """
23
+ _unify.add((cls, cls, dict), unify_object)
24
+ _reify.add((cls, dict), reify_object)
25
+
26
+ return cls
27
+
28
+
29
+ #########
30
+ # Reify #
31
+ #########
32
+
33
+
34
+ def reify_object(o, s):
35
+ """ Reify a Python object with a substitution
36
+ >>> # xdoctest: +SKIP
37
+ >>> class Foo(object):
38
+ ... def __init__(self, a, b):
39
+ ... self.a = a
40
+ ... self.b = b
41
+ ... def __str__(self):
42
+ ... return "Foo(%s, %s)"%(str(self.a), str(self.b))
43
+ >>> x = var('x')
44
+ >>> f = Foo(1, x)
45
+ >>> print(f)
46
+ Foo(1, ~x)
47
+ >>> print(reify_object(f, {x: 2}))
48
+ Foo(1, 2)
49
+ """
50
+ if hasattr(o, '__slots__'):
51
+ return _reify_object_slots(o, s)
52
+ else:
53
+ return _reify_object_dict(o, s)
54
+
55
+
56
+ def _reify_object_dict(o, s):
57
+ obj = object.__new__(type(o))
58
+ d = reify(o.__dict__, s)
59
+ if d == o.__dict__:
60
+ return o
61
+ obj.__dict__.update(d)
62
+ return obj
63
+
64
+
65
+ def _reify_object_slots(o, s):
66
+ attrs = [getattr(o, attr) for attr in o.__slots__]
67
+ new_attrs = reify(attrs, s)
68
+ if attrs == new_attrs:
69
+ return o
70
+ else:
71
+ newobj = object.__new__(type(o))
72
+ for slot, attr in zip(o.__slots__, new_attrs):
73
+ setattr(newobj, slot, attr)
74
+ return newobj
75
+
76
+
77
+ @dispatch(slice, dict)
78
+ def _reify(o, s):
79
+ """ Reify a Python ``slice`` object """
80
+ return slice(*reify((o.start, o.stop, o.step), s))
81
+
82
+
83
+ #########
84
+ # Unify #
85
+ #########
86
+
87
+
88
+ def unify_object(u, v, s):
89
+ """ Unify two Python objects
90
+ Unifies their type and ``__dict__`` attributes
91
+ >>> # xdoctest: +SKIP
92
+ >>> class Foo(object):
93
+ ... def __init__(self, a, b):
94
+ ... self.a = a
95
+ ... self.b = b
96
+ ... def __str__(self):
97
+ ... return "Foo(%s, %s)"%(str(self.a), str(self.b))
98
+ >>> x = var('x')
99
+ >>> f = Foo(1, x)
100
+ >>> g = Foo(1, 2)
101
+ >>> unify_object(f, g, {})
102
+ {~x: 2}
103
+ """
104
+ if type(u) != type(v):
105
+ return False
106
+ if hasattr(u, '__slots__'):
107
+ return unify([getattr(u, slot) for slot in u.__slots__],
108
+ [getattr(v, slot) for slot in v.__slots__],
109
+ s)
110
+ else:
111
+ return unify(u.__dict__, v.__dict__, s)
112
+
113
+
114
+ @dispatch(slice, slice, dict)
115
+ def _unify(u, v, s):
116
+ """ Unify a Python ``slice`` object """
117
+ return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)
env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import operator
3
+ from functools import reduce
4
+ from collections.abc import Mapping
5
+
6
+ __all__ = ('merge', 'merge_with', 'valmap', 'keymap', 'itemmap',
7
+ 'valfilter', 'keyfilter', 'itemfilter',
8
+ 'assoc', 'dissoc', 'assoc_in', 'update_in', 'get_in')
9
+
10
+
11
+ def _get_factory(f, kwargs):
12
+ factory = kwargs.pop('factory', dict)
13
+ if kwargs:
14
+ raise TypeError(f"{f.__name__}() got an unexpected keyword argument '{kwargs.popitem()[0]}'")
15
+ return factory
16
+
17
+
18
+ def merge(*dicts, **kwargs):
19
+ """ Merge a collection of dictionaries
20
+
21
+ >>> merge({1: 'one'}, {2: 'two'})
22
+ {1: 'one', 2: 'two'}
23
+
24
+ Later dictionaries have precedence
25
+
26
+ >>> merge({1: 2, 3: 4}, {3: 3, 4: 4})
27
+ {1: 2, 3: 3, 4: 4}
28
+
29
+ See Also:
30
+ merge_with
31
+ """
32
+ if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
33
+ dicts = dicts[0]
34
+ factory = _get_factory(merge, kwargs)
35
+
36
+ rv = factory()
37
+ for d in dicts:
38
+ rv.update(d)
39
+ return rv
40
+
41
+
42
+ def merge_with(func, *dicts, **kwargs):
43
+ """ Merge dictionaries and apply function to combined values
44
+
45
+ A key may occur in more than one dict, and all values mapped from the key
46
+ will be passed to the function as a list, such as func([val1, val2, ...]).
47
+
48
+ >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20})
49
+ {1: 11, 2: 22}
50
+
51
+ >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP
52
+ {1: 1, 2: 2, 3: 30}
53
+
54
+ See Also:
55
+ merge
56
+ """
57
+ if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
58
+ dicts = dicts[0]
59
+ factory = _get_factory(merge_with, kwargs)
60
+
61
+ result = factory()
62
+ for d in dicts:
63
+ for k, v in d.items():
64
+ if k not in result:
65
+ result[k] = [v]
66
+ else:
67
+ result[k].append(v)
68
+ return valmap(func, result, factory)
69
+
70
+
71
+ def valmap(func, d, factory=dict):
72
+ """ Apply function to values of dictionary
73
+
74
+ >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
75
+ >>> valmap(sum, bills) # doctest: +SKIP
76
+ {'Alice': 65, 'Bob': 45}
77
+
78
+ See Also:
79
+ keymap
80
+ itemmap
81
+ """
82
+ rv = factory()
83
+ rv.update(zip(d.keys(), map(func, d.values())))
84
+ return rv
85
+
86
+
87
+ def keymap(func, d, factory=dict):
88
+ """ Apply function to keys of dictionary
89
+
90
+ >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
91
+ >>> keymap(str.lower, bills) # doctest: +SKIP
92
+ {'alice': [20, 15, 30], 'bob': [10, 35]}
93
+
94
+ See Also:
95
+ valmap
96
+ itemmap
97
+ """
98
+ rv = factory()
99
+ rv.update(zip(map(func, d.keys()), d.values()))
100
+ return rv
101
+
102
+
103
+ def itemmap(func, d, factory=dict):
104
+ """ Apply function to items of dictionary
105
+
106
+ >>> accountids = {"Alice": 10, "Bob": 20}
107
+ >>> itemmap(reversed, accountids) # doctest: +SKIP
108
+ {10: "Alice", 20: "Bob"}
109
+
110
+ See Also:
111
+ keymap
112
+ valmap
113
+ """
114
+ rv = factory()
115
+ rv.update(map(func, d.items()))
116
+ return rv
117
+
118
+
119
+ def valfilter(predicate, d, factory=dict):
120
+ """ Filter items in dictionary by value
121
+
122
+ >>> iseven = lambda x: x % 2 == 0
123
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
124
+ >>> valfilter(iseven, d)
125
+ {1: 2, 3: 4}
126
+
127
+ See Also:
128
+ keyfilter
129
+ itemfilter
130
+ valmap
131
+ """
132
+ rv = factory()
133
+ for k, v in d.items():
134
+ if predicate(v):
135
+ rv[k] = v
136
+ return rv
137
+
138
+
139
+ def keyfilter(predicate, d, factory=dict):
140
+ """ Filter items in dictionary by key
141
+
142
+ >>> iseven = lambda x: x % 2 == 0
143
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
144
+ >>> keyfilter(iseven, d)
145
+ {2: 3, 4: 5}
146
+
147
+ See Also:
148
+ valfilter
149
+ itemfilter
150
+ keymap
151
+ """
152
+ rv = factory()
153
+ for k, v in d.items():
154
+ if predicate(k):
155
+ rv[k] = v
156
+ return rv
157
+
158
+
159
+ def itemfilter(predicate, d, factory=dict):
160
+ """ Filter items in dictionary by item
161
+
162
+ >>> def isvalid(item):
163
+ ... k, v = item
164
+ ... return k % 2 == 0 and v < 4
165
+
166
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
167
+ >>> itemfilter(isvalid, d)
168
+ {2: 3}
169
+
170
+ See Also:
171
+ keyfilter
172
+ valfilter
173
+ itemmap
174
+ """
175
+ rv = factory()
176
+ for item in d.items():
177
+ if predicate(item):
178
+ k, v = item
179
+ rv[k] = v
180
+ return rv
181
+
182
+
183
+ def assoc(d, key, value, factory=dict):
184
+ """ Return a new dict with new key value pair
185
+
186
+ New dict has d[key] set to value. Does not modify the initial dictionary.
187
+
188
+ >>> assoc({'x': 1}, 'x', 2)
189
+ {'x': 2}
190
+ >>> assoc({'x': 1}, 'y', 3) # doctest: +SKIP
191
+ {'x': 1, 'y': 3}
192
+ """
193
+ d2 = factory()
194
+ d2.update(d)
195
+ d2[key] = value
196
+ return d2
197
+
198
+
199
+ def dissoc(d, *keys, **kwargs):
200
+ """ Return a new dict with the given key(s) removed.
201
+
202
+ New dict has d[key] deleted for each supplied key.
203
+ Does not modify the initial dictionary.
204
+
205
+ >>> dissoc({'x': 1, 'y': 2}, 'y')
206
+ {'x': 1}
207
+ >>> dissoc({'x': 1, 'y': 2}, 'y', 'x')
208
+ {}
209
+ >>> dissoc({'x': 1}, 'y') # Ignores missing keys
210
+ {'x': 1}
211
+ """
212
+ factory = _get_factory(dissoc, kwargs)
213
+ d2 = factory()
214
+
215
+ if len(keys) < len(d) * .6:
216
+ d2.update(d)
217
+ for key in keys:
218
+ if key in d2:
219
+ del d2[key]
220
+ else:
221
+ remaining = set(d)
222
+ remaining.difference_update(keys)
223
+ for k in remaining:
224
+ d2[k] = d[k]
225
+ return d2
226
+
227
+
228
+ def assoc_in(d, keys, value, factory=dict):
229
+ """ Return a new dict with new, potentially nested, key value pair
230
+
231
+ >>> purchase = {'name': 'Alice',
232
+ ... 'order': {'items': ['Apple', 'Orange'],
233
+ ... 'costs': [0.50, 1.25]},
234
+ ... 'credit card': '5555-1234-1234-1234'}
235
+ >>> assoc_in(purchase, ['order', 'costs'], [0.25, 1.00]) # doctest: +SKIP
236
+ {'credit card': '5555-1234-1234-1234',
237
+ 'name': 'Alice',
238
+ 'order': {'costs': [0.25, 1.00], 'items': ['Apple', 'Orange']}}
239
+ """
240
+ return update_in(d, keys, lambda x: value, value, factory)
241
+
242
+
243
+ def update_in(d, keys, func, default=None, factory=dict):
244
+ """ Update value in a (potentially) nested dictionary
245
+
246
+ inputs:
247
+ d - dictionary on which to operate
248
+ keys - list or tuple giving the location of the value to be changed in d
249
+ func - function to operate on that value
250
+
251
+ If keys == [k0,..,kX] and d[k0]..[kX] == v, update_in returns a copy of the
252
+ original dictionary with v replaced by func(v), but does not mutate the
253
+ original dictionary.
254
+
255
+ If k0 is not a key in d, update_in creates nested dictionaries to the depth
256
+ specified by the keys, with the innermost value set to func(default).
257
+
258
+ >>> inc = lambda x: x + 1
259
+ >>> update_in({'a': 0}, ['a'], inc)
260
+ {'a': 1}
261
+
262
+ >>> transaction = {'name': 'Alice',
263
+ ... 'purchase': {'items': ['Apple', 'Orange'],
264
+ ... 'costs': [0.50, 1.25]},
265
+ ... 'credit card': '5555-1234-1234-1234'}
266
+ >>> update_in(transaction, ['purchase', 'costs'], sum) # doctest: +SKIP
267
+ {'credit card': '5555-1234-1234-1234',
268
+ 'name': 'Alice',
269
+ 'purchase': {'costs': 1.75, 'items': ['Apple', 'Orange']}}
270
+
271
+ >>> # updating a value when k0 is not in d
272
+ >>> update_in({}, [1, 2, 3], str, default="bar")
273
+ {1: {2: {3: 'bar'}}}
274
+ >>> update_in({1: 'foo'}, [2, 3, 4], inc, 0)
275
+ {1: 'foo', 2: {3: {4: 1}}}
276
+ """
277
+ ks = iter(keys)
278
+ k = next(ks)
279
+
280
+ rv = inner = factory()
281
+ rv.update(d)
282
+
283
+ for key in ks:
284
+ if k in d:
285
+ d = d[k]
286
+ dtemp = factory()
287
+ dtemp.update(d)
288
+ else:
289
+ d = dtemp = factory()
290
+
291
+ inner[k] = inner = dtemp
292
+ k = key
293
+
294
+ if k in d:
295
+ inner[k] = func(d[k])
296
+ else:
297
+ inner[k] = func(default)
298
+ return rv
299
+
300
+
301
+ def get_in(keys, coll, default=None, no_default=False):
302
+ """ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
303
+
304
+ If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
305
+ ``no_default`` is specified, then it raises KeyError or IndexError.
306
+
307
+ ``get_in`` is a generalization of ``operator.getitem`` for nested data
308
+ structures such as dictionaries and lists.
309
+
310
+ >>> transaction = {'name': 'Alice',
311
+ ... 'purchase': {'items': ['Apple', 'Orange'],
312
+ ... 'costs': [0.50, 1.25]},
313
+ ... 'credit card': '5555-1234-1234-1234'}
314
+ >>> get_in(['purchase', 'items', 0], transaction)
315
+ 'Apple'
316
+ >>> get_in(['name'], transaction)
317
+ 'Alice'
318
+ >>> get_in(['purchase', 'total'], transaction)
319
+ >>> get_in(['purchase', 'items', 'apple'], transaction)
320
+ >>> get_in(['purchase', 'items', 10], transaction)
321
+ >>> get_in(['purchase', 'total'], transaction, 0)
322
+ 0
323
+ >>> get_in(['y'], {}, no_default=True)
324
+ Traceback (most recent call last):
325
+ ...
326
+ KeyError: 'y'
327
+
328
+ See Also:
329
+ itertoolz.get
330
+ operator.getitem
331
+ """
332
+ try:
333
+ return reduce(operator.getitem, keys, coll)
334
+ except (KeyError, IndexError, TypeError):
335
+ if no_default:
336
+ raise
337
+ return default
338
+
339
+
340
+ def getter(index):
341
+ if isinstance(index, list):
342
+ if len(index) == 1:
343
+ index = index[0]
344
+ return lambda x: (x[index],)
345
+ elif index:
346
+ return operator.itemgetter(*index)
347
+ else:
348
+ return lambda x: ()
349
+ else:
350
+ return operator.itemgetter(index)
351
+
352
+
353
+ def groupby(key, seq):
354
+ """ Group a collection by a key function
355
+
356
+ >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
357
+ >>> groupby(len, names) # doctest: +SKIP
358
+ {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
359
+
360
+ >>> iseven = lambda x: x % 2 == 0
361
+ >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
362
+ {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
363
+
364
+ Non-callable keys imply grouping on a member.
365
+
366
+ >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
367
+ ... {'name': 'Bob', 'gender': 'M'},
368
+ ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
369
+ {'F': [{'gender': 'F', 'name': 'Alice'}],
370
+ 'M': [{'gender': 'M', 'name': 'Bob'},
371
+ {'gender': 'M', 'name': 'Charlie'}]}
372
+
373
+ Not to be confused with ``itertools.groupby``
374
+
375
+ See Also:
376
+ countby
377
+ """
378
+ if not callable(key):
379
+ key = getter(key)
380
+ d = collections.defaultdict(lambda: [].append) # type: ignore[var-annotated]
381
+ for item in seq:
382
+ d[key(item)](item)
383
+ rv = {}
384
+ for k, v in d.items():
385
+ rv[k] = v.__self__ # type: ignore[var-annotated, attr-defined]
386
+ return rv
387
+
388
+
389
+ def first(seq):
390
+ """ The first element in a sequence
391
+
392
+ >>> first('ABC')
393
+ 'A'
394
+ """
395
+ return next(iter(seq))
env-llmeval/lib/python3.10/site-packages/torch/masked/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .maskedtensor.core import is_masked_tensor, MaskedTensor
2
+ from .maskedtensor.creation import as_masked_tensor, masked_tensor
3
+ from ._ops import (
4
+ _canonical_dim,
5
+ _generate_docstring,
6
+ _reduction_identity,
7
+ _where,
8
+ _input_mask,
9
+ _output_mask,
10
+ _combine_input_and_mask,
11
+ sum,
12
+ prod,
13
+ cumsum,
14
+ cumprod,
15
+ amax,
16
+ amin,
17
+ argmax,
18
+ argmin,
19
+ mean,
20
+ median,
21
+ logsumexp,
22
+ logaddexp,
23
+ norm,
24
+ var,
25
+ std,
26
+ softmax,
27
+ log_softmax,
28
+ softmin,
29
+ normalize,
30
+ )
31
+
32
+ __all__ = [
33
+ "as_masked_tensor",
34
+ "is_masked_tensor",
35
+ "masked_tensor",
36
+ "MaskedTensor",
37
+ ]
env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (905 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc ADDED
Binary file (49.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc ADDED
Binary file (41.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/masked/_docs.py ADDED
@@ -0,0 +1,1177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is generated, do not modify it!
2
+ #
3
+ # To update this file, run the update masked docs script as follows:
4
+ #
5
+ # python tools/update_masked_docs.py
6
+ #
7
+ # The script must be called from an environment where the development
8
+ # version of torch package can be imported and is functional.
9
+ #
10
+
11
+ amax_docstring = """amax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
12
+
13
+ Returns maximum of all the elements in the :attr:`input`
14
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
15
+ elements are masked out according to the boolean tensor
16
+ :attr:`mask`.
17
+
18
+ The identity value of maximum operation, which is used to start the
19
+ reduction, depends on input dtype. For instance, for float32, uint8,
20
+ and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively.
21
+
22
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
23
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
24
+ size 1. Otherwise, :attr:`dim` is squeezed (see
25
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
26
+ ``len(dim)``) fewer dimension(s).
27
+
28
+ The boolean tensor :attr:`mask` defines the "validity" of
29
+ :attr:`input` tensor elements: if :attr:`mask` element is True
30
+ then the corresponding element in :attr:`input` tensor will be
31
+ included in maximum computation, otherwise the element is
32
+ ignored.
33
+
34
+ When all elements of :attr:`input` along the given dimension
35
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
36
+ of the output tensor will have undefined value: it may or may not
37
+ correspond to the identity value of maximum operation; the
38
+ choice may correspond to the value that leads to the most efficient
39
+ storage of :attr:`output` tensor.
40
+
41
+ The mask of the output tensor can be computed as
42
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
43
+ dtype=torch.bool)``.
44
+
45
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
46
+ don't need to match, but they must be :ref:`broadcastable
47
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
48
+ tensor must not be greater than of the :attr:`input` tensor.
49
+
50
+ Args:
51
+ input (Tensor): the input tensor
52
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
53
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
54
+
55
+ Keyword args:
56
+ keepdim (bool, optional): whether the output tensor has
57
+ :attr:`dim` retained or not. Default: False.
58
+ dtype (:class:`torch.dtype`, optional): the desired data type
59
+ of returned tensor. If specified, the input tensor is
60
+ casted to :attr:`dtype` before the operation is
61
+ performed. Default: None.
62
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
63
+ containing the binary mask of validity of input tensor
64
+ elements.
65
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
66
+
67
+ Example::
68
+
69
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
70
+ >>> input
71
+ tensor([[-3, -2, -1],
72
+ [ 0, 1, 2]])
73
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
74
+ >>> mask
75
+ tensor([[ True, False, True],
76
+ [False, False, False]])
77
+ >>> torch.masked._ops.amax(input, 1, mask=mask)
78
+ tensor([ -1, -9223372036854775808])
79
+ """
80
+
81
+ amin_docstring = """amin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
82
+
83
+ Returns minimum of all the elements in the :attr:`input`
84
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
85
+ elements are masked out according to the boolean tensor
86
+ :attr:`mask`.
87
+
88
+ The identity value of minimum operation, which is used to start the
89
+ reduction, depends on input dtype. For instance, for float32, uint8,
90
+ and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively.
91
+
92
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
93
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
94
+ size 1. Otherwise, :attr:`dim` is squeezed (see
95
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
96
+ ``len(dim)``) fewer dimension(s).
97
+
98
+ The boolean tensor :attr:`mask` defines the "validity" of
99
+ :attr:`input` tensor elements: if :attr:`mask` element is True
100
+ then the corresponding element in :attr:`input` tensor will be
101
+ included in minimum computation, otherwise the element is
102
+ ignored.
103
+
104
+ When all elements of :attr:`input` along the given dimension
105
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
106
+ of the output tensor will have undefined value: it may or may not
107
+ correspond to the identity value of minimum operation; the
108
+ choice may correspond to the value that leads to the most efficient
109
+ storage of :attr:`output` tensor.
110
+
111
+ The mask of the output tensor can be computed as
112
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
113
+ dtype=torch.bool)``.
114
+
115
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
116
+ don't need to match, but they must be :ref:`broadcastable
117
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
118
+ tensor must not be greater than of the :attr:`input` tensor.
119
+
120
+ Args:
121
+ input (Tensor): the input tensor
122
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
123
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
124
+
125
+ Keyword args:
126
+ keepdim (bool, optional): whether the output tensor has
127
+ :attr:`dim` retained or not. Default: False.
128
+ dtype (:class:`torch.dtype`, optional): the desired data type
129
+ of returned tensor. If specified, the input tensor is
130
+ casted to :attr:`dtype` before the operation is
131
+ performed. Default: None.
132
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
133
+ containing the binary mask of validity of input tensor
134
+ elements.
135
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
136
+
137
+ Example::
138
+
139
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
140
+ >>> input
141
+ tensor([[-3, -2, -1],
142
+ [ 0, 1, 2]])
143
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
144
+ >>> mask
145
+ tensor([[ True, False, True],
146
+ [False, False, False]])
147
+ >>> torch.masked._ops.amin(input, 1, mask=mask)
148
+ tensor([ -3, 9223372036854775807])
149
+ """
150
+
151
+ argmax_docstring = """argmax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
152
+ Returns argmax of all the elements in the :attr:`input`
153
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
154
+ elements are masked out according to the boolean tensor
155
+ :attr:`mask`.
156
+ The identity value of argmax operation, which is used to start the
157
+ reduction, depends on input dtype. For instance, for float32, uint8,
158
+ and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively.
159
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
160
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
161
+ size 1. Otherwise, :attr:`dim` is squeezed (see
162
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
163
+ ``len(dim)``) fewer dimension(s).
164
+
165
+ The boolean tensor :attr:`mask` defines the "validity" of
166
+ :attr:`input` tensor elements: if :attr:`mask` element is True
167
+ then the corresponding element in :attr:`input` tensor will be
168
+ included in argmax computation, otherwise the element is
169
+ ignored.
170
+
171
+ When all elements of :attr:`input` along the given dimension
172
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
173
+ of the output tensor will have undefined value: it may or may not
174
+ correspond to the identity value of argmax operation; the
175
+ choice may correspond to the value that leads to the most efficient
176
+ storage of :attr:`output` tensor.
177
+
178
+ The mask of the output tensor can be computed as
179
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
180
+ dtype=torch.bool)``.
181
+
182
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
183
+ don't need to match, but they must be :ref:`broadcastable
184
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
185
+ tensor must not be greater than of the :attr:`input` tensor.
186
+
187
+ Args:
188
+ input (Tensor): the input tensor
189
+ dim (int): the dimension along which argmax is computed.
190
+
191
+ Keyword args:
192
+ keepdim (bool, optional): whether the output tensor has
193
+ :attr:`dim` retained or not. Default: False.
194
+ dtype (:class:`torch.dtype`, optional): the desired data type
195
+ of returned tensor. If specified, the input tensor is
196
+ casted to :attr:`dtype` before the operation is
197
+ performed. Default: None.
198
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
199
+ containing the binary mask of validity of input tensor
200
+ elements.
201
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
202
+ Example::
203
+
204
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
205
+ >>> input
206
+ tensor([[-3, -2, -1],
207
+ [ 0, 1, 2]])
208
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
209
+ >>> mask
210
+ tensor([[ True, False, True],
211
+ [False, False, False]])
212
+ >>> torch.masked._ops.argmax(input, 1, mask=mask)
213
+ tensor([2, 0])
214
+ """
215
+
216
+ argmin_docstring = """argmin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
217
+ Returns argmin of all the elements in the :attr:`input`
218
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
219
+ elements are masked out according to the boolean tensor
220
+ :attr:`mask`.
221
+ The identity value of argmin operation, which is used to start the
222
+ reduction, depends on input dtype. For instance, for float32, uint8,
223
+ and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively.
224
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
225
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
226
+ size 1. Otherwise, :attr:`dim` is squeezed (see
227
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
228
+ ``len(dim)``) fewer dimension(s).
229
+
230
+ The boolean tensor :attr:`mask` defines the "validity" of
231
+ :attr:`input` tensor elements: if :attr:`mask` element is True
232
+ then the corresponding element in :attr:`input` tensor will be
233
+ included in argmin computation, otherwise the element is
234
+ ignored.
235
+
236
+ When all elements of :attr:`input` along the given dimension
237
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
238
+ of the output tensor will have undefined value: it may or may not
239
+ correspond to the identity value of argmin operation; the
240
+ choice may correspond to the value that leads to the most efficient
241
+ storage of :attr:`output` tensor.
242
+
243
+ The mask of the output tensor can be computed as
244
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
245
+ dtype=torch.bool)``.
246
+
247
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
248
+ don't need to match, but they must be :ref:`broadcastable
249
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
250
+ tensor must not be greater than of the :attr:`input` tensor.
251
+
252
+ Args:
253
+ input (Tensor): the input tensor
254
+ dim (int): the dimension along which argmin is computed.
255
+
256
+ Keyword args:
257
+ keepdim (bool, optional): whether the output tensor has
258
+ :attr:`dim` retained or not. Default: False.
259
+ dtype (:class:`torch.dtype`, optional): the desired data type
260
+ of returned tensor. If specified, the input tensor is
261
+ casted to :attr:`dtype` before the operation is
262
+ performed. Default: None.
263
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
264
+ containing the binary mask of validity of input tensor
265
+ elements.
266
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
267
+ Example::
268
+
269
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
270
+ >>> input
271
+ tensor([[-3, -2, -1],
272
+ [ 0, 1, 2]])
273
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
274
+ >>> mask
275
+ tensor([[ True, False, True],
276
+ [False, False, False]])
277
+ >>> torch.masked._ops.argmin(input, 1, mask=mask)
278
+ tensor([0, 0])
279
+ """
280
+
281
+ cumprod_docstring = """cumprod(input, dim, *, dtype=None, mask=None) -> Tensor
282
+
283
+ Returns cumulative_prod of all the slices in the :attr:`input` tensor
284
+ along :attr:`dim` while the :attr:`input` elements are masked out
285
+ according to the boolean tensor :attr:`mask`.
286
+
287
+ Let ``x`` be a sequence of unmasked elements of one-dimensional slice
288
+ of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
289
+ defined as ``prod(x[:i])``.
290
+
291
+ The boolean tensor :attr:`mask` defines the "validity" of
292
+ :attr:`input` tensor elements: if :attr:`mask` element is True then
293
+ the corresponding element in :attr:`input` tensor will be included in
294
+ cumulative_prod computation, otherwise the element is ignored.
295
+
296
+ The values of masked-out elements of the output tensor have undefined
297
+ value: it may or may not be set to zero or nan; the choice may correspond to
298
+ the value that leads to the most efficient storage of :attr:`output`
299
+ tensor.
300
+
301
+ The mask of the cumulative_prod output tensor can be computed as
302
+ ``torch.broadcast_to(mask, input.shape)``.
303
+
304
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
305
+ don't need to match, but they must be :ref:`broadcastable
306
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
307
+ tensor must not be greater than of the :attr:`input` tensor.
308
+
309
+ Args:
310
+ input (Tensor): the input tensor
311
+ dim (int): the dimension along which cumulative_prod is computed.
312
+
313
+ Keyword args:
314
+ dtype (:class:`torch.dtype`, optional): the desired data type
315
+ of returned tensor. If specified, the input tensor is
316
+ casted to :attr:`dtype` before the operation is
317
+ performed. Default: None.
318
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
319
+ containing the binary mask of validity of input tensor
320
+ elements.
321
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
322
+
323
+ Example::
324
+
325
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
326
+ >>> input
327
+ tensor([[-3., -2., -1.],
328
+ [ 0., 1., 2.]])
329
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
330
+ >>> mask
331
+ tensor([[ True, False, True],
332
+ [False, False, False]])
333
+ >>> torch.masked._ops.cumprod(input, 1, mask=mask)
334
+ tensor([[-3., -3., 3.],
335
+ [ 1., 1., 1.]])
336
+ """
337
+
338
+ cumsum_docstring = """cumsum(input, dim, *, dtype=None, mask=None) -> Tensor
339
+
340
+ Returns cumulative_sum of all the slices in the :attr:`input` tensor
341
+ along :attr:`dim` while the :attr:`input` elements are masked out
342
+ according to the boolean tensor :attr:`mask`.
343
+
344
+ Let ``x`` be a sequence of unmasked elements of one-dimensional slice
345
+ of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
346
+ defined as ``sum(x[:i])``.
347
+
348
+ The boolean tensor :attr:`mask` defines the "validity" of
349
+ :attr:`input` tensor elements: if :attr:`mask` element is True then
350
+ the corresponding element in :attr:`input` tensor will be included in
351
+ cumulative_sum computation, otherwise the element is ignored.
352
+
353
+ The values of masked-out elements of the output tensor have undefined
354
+ value: it may or may not be set to zero or nan; the choice may correspond to
355
+ the value that leads to the most efficient storage of :attr:`output`
356
+ tensor.
357
+
358
+ The mask of the cumulative_sum output tensor can be computed as
359
+ ``torch.broadcast_to(mask, input.shape)``.
360
+
361
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
362
+ don't need to match, but they must be :ref:`broadcastable
363
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
364
+ tensor must not be greater than of the :attr:`input` tensor.
365
+
366
+ Args:
367
+ input (Tensor): the input tensor
368
+ dim (int): the dimension along which cumulative_sum is computed.
369
+
370
+ Keyword args:
371
+ dtype (:class:`torch.dtype`, optional): the desired data type
372
+ of returned tensor. If specified, the input tensor is
373
+ casted to :attr:`dtype` before the operation is
374
+ performed. Default: None.
375
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
376
+ containing the binary mask of validity of input tensor
377
+ elements.
378
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
379
+
380
+ Example::
381
+
382
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
383
+ >>> input
384
+ tensor([[-3., -2., -1.],
385
+ [ 0., 1., 2.]])
386
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
387
+ >>> mask
388
+ tensor([[ True, False, True],
389
+ [False, False, False]])
390
+ >>> torch.masked._ops.cumsum(input, 1, mask=mask)
391
+ tensor([[-3., -3., -4.],
392
+ [ 0., 0., 0.]])
393
+ """
394
+
395
+ log_softmax_docstring = """log_softmax(input, dim, *, dtype=None, mask=None) -> Tensor
396
+
397
+ Returns log_softmax of all the slices in the :attr:`input` tensor
398
+ along :attr:`dim` while the :attr:`input` elements are masked out
399
+ according to the boolean tensor :attr:`mask`.
400
+
401
+ Let ``x`` be a sequence of unmasked elements of one-dimensional slice
402
+ of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
403
+ defined as ``log(exp(x[i])/sum(exp(x)))``.
404
+
405
+ The boolean tensor :attr:`mask` defines the "validity" of
406
+ :attr:`input` tensor elements: if :attr:`mask` element is True then
407
+ the corresponding element in :attr:`input` tensor will be included in
408
+ log_softmax computation, otherwise the element is ignored.
409
+
410
+ The values of masked-out elements of the output tensor have undefined
411
+ value: it may or may not be set to zero or nan; the choice may correspond to
412
+ the value that leads to the most efficient storage of :attr:`output`
413
+ tensor.
414
+
415
+ The mask of the log_softmax output tensor can be computed as
416
+ ``torch.broadcast_to(mask, input.shape)``.
417
+
418
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
419
+ don't need to match, but they must be :ref:`broadcastable
420
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
421
+ tensor must not be greater than of the :attr:`input` tensor.
422
+
423
+ Args:
424
+ input (Tensor): the input tensor
425
+ dim (int): the dimension along which log_softmax is computed.
426
+
427
+ Keyword args:
428
+ dtype (:class:`torch.dtype`, optional): the desired data type
429
+ of returned tensor. If specified, the input tensor is
430
+ casted to :attr:`dtype` before the operation is
431
+ performed. Default: None.
432
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
433
+ containing the binary mask of validity of input tensor
434
+ elements.
435
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
436
+
437
+ Example::
438
+
439
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
440
+ >>> input
441
+ tensor([[-3., -2., -1.],
442
+ [ 0., 1., 2.]])
443
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
444
+ >>> mask
445
+ tensor([[ True, False, True],
446
+ [False, False, False]])
447
+ >>> torch.masked._ops.log_softmax(input, 1, mask=mask)
448
+ tensor([[-2.1269, -inf, -0.1269],
449
+ [ nan, nan, nan]])
450
+ """
451
+
452
+ logsumexp_docstring = """logsumexp(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
453
+
454
+ Returns logsumexp of all the elements in the :attr:`input`
455
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
456
+ elements are masked out according to the boolean tensor
457
+ :attr:`mask`.
458
+
459
+ The identity value of logsumexp operation, which is used to start the reduction, is ``-2147483648``.
460
+
461
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
462
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
463
+ size 1. Otherwise, :attr:`dim` is squeezed (see
464
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
465
+ ``len(dim)``) fewer dimension(s).
466
+
467
+ The boolean tensor :attr:`mask` defines the "validity" of
468
+ :attr:`input` tensor elements: if :attr:`mask` element is True
469
+ then the corresponding element in :attr:`input` tensor will be
470
+ included in logsumexp computation, otherwise the element is
471
+ ignored.
472
+
473
+ When all elements of :attr:`input` along the given dimension
474
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
475
+ of the output tensor will have undefined value: it may or may not
476
+ correspond to the identity value of logsumexp operation; the
477
+ choice may correspond to the value that leads to the most efficient
478
+ storage of :attr:`output` tensor.
479
+
480
+ The mask of the output tensor can be computed as
481
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
482
+ dtype=torch.bool)``.
483
+
484
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
485
+ don't need to match, but they must be :ref:`broadcastable
486
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
487
+ tensor must not be greater than of the :attr:`input` tensor.
488
+
489
+ Args:
490
+ input (Tensor): the input tensor
491
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
492
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
493
+
494
+ Keyword args:
495
+ keepdim (bool, optional): whether the output tensor has
496
+ :attr:`dim` retained or not. Default: False.
497
+ dtype (:class:`torch.dtype`, optional): the desired data type
498
+ of returned tensor. If specified, the input tensor is
499
+ casted to :attr:`dtype` before the operation is
500
+ performed. Default: None.
501
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
502
+ containing the binary mask of validity of input tensor
503
+ elements.
504
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
505
+
506
+ Example::
507
+
508
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
509
+ >>> input
510
+ tensor([[-3, -2, -1],
511
+ [ 0, 1, 2]])
512
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
513
+ >>> mask
514
+ tensor([[ True, False, True],
515
+ [False, False, False]])
516
+ >>> torch.masked._ops.logsumexp(input, 1, mask=mask)
517
+ tensor([ 0, -9223372036854775808])
518
+ """
519
+
520
+ mean_docstring = """mean(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
521
+
522
+ Returns mean of all the elements in the :attr:`input`
523
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
524
+ elements are masked out according to the boolean tensor
525
+ :attr:`mask`.
526
+
527
+ By definition, the identity value of a mean operation is the mean
528
+ value of the tensor. If all elements of the input tensor along given
529
+ dimension(s) :attr:`dim` are masked-out, the identity value of the
530
+ mean is undefined. Due to this ambiguity, the elements of output
531
+ tensor with strided layout, that correspond to fully masked-out
532
+ elements, have ``nan`` values.
533
+
534
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
535
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
536
+ size 1. Otherwise, :attr:`dim` is squeezed (see
537
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
538
+ ``len(dim)``) fewer dimension(s).
539
+
540
+ The boolean tensor :attr:`mask` defines the "validity" of
541
+ :attr:`input` tensor elements: if :attr:`mask` element is True
542
+ then the corresponding element in :attr:`input` tensor will be
543
+ included in mean computation, otherwise the element is
544
+ ignored.
545
+
546
+ When all elements of :attr:`input` along the given dimension
547
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
548
+ of the output tensor will have undefined value: it may or may not
549
+ correspond to the identity value of mean operation; the
550
+ choice may correspond to the value that leads to the most efficient
551
+ storage of :attr:`output` tensor.
552
+
553
+ The mask of the output tensor can be computed as
554
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
555
+ dtype=torch.bool)``.
556
+
557
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
558
+ don't need to match, but they must be :ref:`broadcastable
559
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
560
+ tensor must not be greater than of the :attr:`input` tensor.
561
+
562
+ Args:
563
+ input (Tensor): the input tensor
564
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
565
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
566
+
567
+ Keyword args:
568
+ keepdim (bool, optional): whether the output tensor has
569
+ :attr:`dim` retained or not. Default: False.
570
+ dtype (:class:`torch.dtype`, optional): the desired data type
571
+ of returned tensor. If specified, the input tensor is
572
+ casted to :attr:`dtype` before the operation is
573
+ performed. Default: None.
574
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
575
+ containing the binary mask of validity of input tensor
576
+ elements.
577
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
578
+
579
+ Example::
580
+
581
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
582
+ >>> input
583
+ tensor([[-3, -2, -1],
584
+ [ 0, 1, 2]])
585
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
586
+ >>> mask
587
+ tensor([[ True, False, True],
588
+ [False, False, False]])
589
+ >>> torch.masked._ops.mean(input, 1, mask=mask)
590
+ tensor([-2., nan])
591
+ """
592
+
593
+ median_docstring = """median(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
594
+ Returns median of all the elements in the :attr:`input`
595
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
596
+ elements are masked out according to the boolean tensor
597
+ :attr:`mask`.
598
+ By definition, the identity value of a median operation is the median
599
+ value of the tensor. If all elements of the input tensor along given
600
+ dimension(s) :attr:`dim` are masked-out, the identity value of the
601
+ median is undefined. Due to this ambiguity, the elements of output
602
+ tensor with strided layout, that correspond to fully masked-out
603
+ elements, have ``nan`` values.
604
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
605
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
606
+ size 1. Otherwise, :attr:`dim` is squeezed (see
607
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
608
+ ``len(dim)``) fewer dimension(s).
609
+
610
+ The boolean tensor :attr:`mask` defines the "validity" of
611
+ :attr:`input` tensor elements: if :attr:`mask` element is True
612
+ then the corresponding element in :attr:`input` tensor will be
613
+ included in median computation, otherwise the element is
614
+ ignored.
615
+
616
+ When all elements of :attr:`input` along the given dimension
617
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
618
+ of the output tensor will have undefined value: it may or may not
619
+ correspond to the identity value of median operation; the
620
+ choice may correspond to the value that leads to the most efficient
621
+ storage of :attr:`output` tensor.
622
+
623
+ The mask of the output tensor can be computed as
624
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
625
+ dtype=torch.bool)``.
626
+
627
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
628
+ don't need to match, but they must be :ref:`broadcastable
629
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
630
+ tensor must not be greater than of the :attr:`input` tensor.
631
+
632
+ Args:
633
+ input (Tensor): the input tensor
634
+ dim (int): the dimension along which median is computed.
635
+
636
+ Keyword args:
637
+ keepdim (bool, optional): whether the output tensor has
638
+ :attr:`dim` retained or not. Default: False.
639
+ dtype (:class:`torch.dtype`, optional): the desired data type
640
+ of returned tensor. If specified, the input tensor is
641
+ casted to :attr:`dtype` before the operation is
642
+ performed. Default: None.
643
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
644
+ containing the binary mask of validity of input tensor
645
+ elements.
646
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
647
+ Example::
648
+
649
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
650
+ >>> input
651
+ tensor([[-3., -2., -1.],
652
+ [ 0., 1., 2.]])
653
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
654
+ >>> mask
655
+ tensor([[ True, False, True],
656
+ [False, False, False]])
657
+ >>> torch.masked._ops.median(input, 1, mask=mask)
658
+ tensor([-3., nan])
659
+ """
660
+
661
+ norm_docstring = """norm(input, ord, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
662
+
663
+ Returns norm of all the elements in the :attr:`input`
664
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
665
+ elements are masked out according to the boolean tensor
666
+ :attr:`mask`.
667
+
668
+ The identity value of norm operation, which is used to start the
669
+ reduction, is ``0.0``, except for ``ord=-inf`` it is
670
+ ``inf``.
671
+
672
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
673
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
674
+ size 1. Otherwise, :attr:`dim` is squeezed (see
675
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
676
+ ``len(dim)``) fewer dimension(s).
677
+
678
+ The boolean tensor :attr:`mask` defines the "validity" of
679
+ :attr:`input` tensor elements: if :attr:`mask` element is True
680
+ then the corresponding element in :attr:`input` tensor will be
681
+ included in norm computation, otherwise the element is
682
+ ignored.
683
+
684
+ When all elements of :attr:`input` along the given dimension
685
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
686
+ of the output tensor will have undefined value: it may or may not
687
+ correspond to the identity value of norm operation; the
688
+ choice may correspond to the value that leads to the most efficient
689
+ storage of :attr:`output` tensor.
690
+
691
+ The mask of the output tensor can be computed as
692
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
693
+ dtype=torch.bool)``.
694
+
695
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
696
+ don't need to match, but they must be :ref:`broadcastable
697
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
698
+ tensor must not be greater than of the :attr:`input` tensor.
699
+
700
+ Args:
701
+ input (Tensor): the input tensor
702
+ ord (int, float, optional): the order of vector norm. Default: 2.
703
+ See :func:`torch.linalg.vector_norm` for a list of supported norms.
704
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
705
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
706
+
707
+ Keyword args:
708
+ keepdim (bool, optional): whether the output tensor has
709
+ :attr:`dim` retained or not. Default: False.
710
+ dtype (:class:`torch.dtype`, optional): the desired data type
711
+ of returned tensor. If specified, the input tensor is
712
+ casted to :attr:`dtype` before the operation is
713
+ performed. Default: None.
714
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
715
+ containing the binary mask of validity of input tensor
716
+ elements.
717
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
718
+
719
+ Example::
720
+
721
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
722
+ >>> input
723
+ tensor([[-3., -2., -1.],
724
+ [ 0., 1., 2.]])
725
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
726
+ >>> mask
727
+ tensor([[ True, False, True],
728
+ [False, False, False]])
729
+ >>> torch.masked._ops.norm(input, 2.0, 1, mask=mask)
730
+ tensor([3.1623, 0.0000])
731
+ """
732
+
733
+ normalize_docstring = """normalize(input, ord, dim, *, eps=1e-12, dtype=None, mask=None) -> Tensor
734
+
735
+ Returns normalize of all the slices in the :attr:`input` tensor
736
+ along :attr:`dim` while the :attr:`input` elements are masked out
737
+ according to the boolean tensor :attr:`mask`.
738
+
739
+ Let ``x`` be a sequence of unmasked elements of one-dimensional slice
740
+ of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
741
+ defined as ``x[i]/max(norm(x, p), eps)``.
742
+
743
+ The boolean tensor :attr:`mask` defines the "validity" of
744
+ :attr:`input` tensor elements: if :attr:`mask` element is True then
745
+ the corresponding element in :attr:`input` tensor will be included in
746
+ normalize computation, otherwise the element is ignored.
747
+
748
+ The values of masked-out elements of the output tensor have undefined
749
+ value: it may or may not be set to zero or nan; the choice may correspond to
750
+ the value that leads to the most efficient storage of :attr:`output`
751
+ tensor.
752
+
753
+ The mask of the normalize output tensor can be computed as
754
+ ``torch.broadcast_to(mask, input.shape)``.
755
+
756
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
757
+ don't need to match, but they must be :ref:`broadcastable
758
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
759
+ tensor must not be greater than of the :attr:`input` tensor.
760
+
761
+ Args:
762
+ input (Tensor): the input tensor
763
+ ord (int, float): the order of vector norm. Default: 2.
764
+ See :func:`torch.linalg.vector_norm` for a list of supported norms.
765
+ dim (int): the dimension along which normalize is computed.
766
+
767
+ Keyword args:
768
+ eps (float, optional): small value to avoid division by zero. Default: 1e-12.
769
+ dtype (:class:`torch.dtype`, optional): the desired data type
770
+ of returned tensor. If specified, the input tensor is
771
+ casted to :attr:`dtype` before the operation is
772
+ performed. Default: None.
773
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
774
+ containing the binary mask of validity of input tensor
775
+ elements.
776
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
777
+
778
+ Example::
779
+
780
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
781
+ >>> input
782
+ tensor([[-3., -2., -1.],
783
+ [ 0., 1., 2.]])
784
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
785
+ >>> mask
786
+ tensor([[ True, False, True],
787
+ [False, False, False]])
788
+ >>> torch.masked._ops.normalize(input, 2.0, 1, mask=mask)
789
+ tensor([[-0.9487, 0.0000, -0.3162],
790
+ [ 0.0000, 0.0000, 0.0000]])
791
+ """
792
+
793
+ prod_docstring = """prod(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
794
+
795
+ Returns product of all the elements in the :attr:`input`
796
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
797
+ elements are masked out according to the boolean tensor
798
+ :attr:`mask`.
799
+
800
+ The identity value of product operation, which is used to start the reduction, is ``1``.
801
+
802
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
803
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
804
+ size 1. Otherwise, :attr:`dim` is squeezed (see
805
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
806
+ ``len(dim)``) fewer dimension(s).
807
+
808
+ The boolean tensor :attr:`mask` defines the "validity" of
809
+ :attr:`input` tensor elements: if :attr:`mask` element is True
810
+ then the corresponding element in :attr:`input` tensor will be
811
+ included in product computation, otherwise the element is
812
+ ignored.
813
+
814
+ When all elements of :attr:`input` along the given dimension
815
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
816
+ of the output tensor will have undefined value: it may or may not
817
+ correspond to the identity value of product operation; the
818
+ choice may correspond to the value that leads to the most efficient
819
+ storage of :attr:`output` tensor.
820
+
821
+ The mask of the output tensor can be computed as
822
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
823
+ dtype=torch.bool)``.
824
+
825
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
826
+ don't need to match, but they must be :ref:`broadcastable
827
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
828
+ tensor must not be greater than of the :attr:`input` tensor.
829
+
830
+ Args:
831
+ input (Tensor): the input tensor
832
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
833
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
834
+
835
+ Keyword args:
836
+ keepdim (bool, optional): whether the output tensor has
837
+ :attr:`dim` retained or not. Default: False.
838
+ dtype (:class:`torch.dtype`, optional): the desired data type
839
+ of returned tensor. If specified, the input tensor is
840
+ casted to :attr:`dtype` before the operation is
841
+ performed. Default: None.
842
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
843
+ containing the binary mask of validity of input tensor
844
+ elements.
845
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
846
+
847
+ Example::
848
+
849
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
850
+ >>> input
851
+ tensor([[-3, -2, -1],
852
+ [ 0, 1, 2]])
853
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
854
+ >>> mask
855
+ tensor([[ True, False, True],
856
+ [False, False, False]])
857
+ >>> torch.masked._ops.prod(input, 1, mask=mask)
858
+ tensor([3, 1])
859
+ """
860
+
861
+ softmax_docstring = """softmax(input, dim, *, dtype=None, mask=None) -> Tensor
862
+
863
+ Returns softmax of all the slices in the :attr:`input` tensor
864
+ along :attr:`dim` while the :attr:`input` elements are masked out
865
+ according to the boolean tensor :attr:`mask`.
866
+
867
+ Let ``x`` be a sequence of unmasked elements of one-dimensional slice
868
+ of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
869
+ defined as ``exp(x[i])/sum(exp(x))``.
870
+
871
+ The boolean tensor :attr:`mask` defines the "validity" of
872
+ :attr:`input` tensor elements: if :attr:`mask` element is True then
873
+ the corresponding element in :attr:`input` tensor will be included in
874
+ softmax computation, otherwise the element is ignored.
875
+
876
+ The values of masked-out elements of the output tensor have undefined
877
+ value: it may or may not be set to zero or nan; the choice may correspond to
878
+ the value that leads to the most efficient storage of :attr:`output`
879
+ tensor.
880
+
881
+ The mask of the softmax output tensor can be computed as
882
+ ``torch.broadcast_to(mask, input.shape)``.
883
+
884
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
885
+ don't need to match, but they must be :ref:`broadcastable
886
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
887
+ tensor must not be greater than of the :attr:`input` tensor.
888
+
889
+ Args:
890
+ input (Tensor): the input tensor
891
+ dim (int): the dimension along which softmax is computed.
892
+
893
+ Keyword args:
894
+ dtype (:class:`torch.dtype`, optional): the desired data type
895
+ of returned tensor. If specified, the input tensor is
896
+ casted to :attr:`dtype` before the operation is
897
+ performed. Default: None.
898
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
899
+ containing the binary mask of validity of input tensor
900
+ elements.
901
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
902
+
903
+ Example::
904
+
905
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
906
+ >>> input
907
+ tensor([[-3., -2., -1.],
908
+ [ 0., 1., 2.]])
909
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
910
+ >>> mask
911
+ tensor([[ True, False, True],
912
+ [False, False, False]])
913
+ >>> torch.masked._ops.softmax(input, 1, mask=mask)
914
+ tensor([[0.1192, 0.0000, 0.8808],
915
+ [ nan, nan, nan]])
916
+ """
917
+
918
+ softmin_docstring = """softmin(input, dim, *, dtype=None, mask=None) -> Tensor
919
+
920
+ Returns softmin of all the slices in the :attr:`input` tensor
921
+ along :attr:`dim` while the :attr:`input` elements are masked out
922
+ according to the boolean tensor :attr:`mask`.
923
+
924
+ Let ``x`` be a sequence of unmasked elements of one-dimensional slice
925
+ of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
926
+ defined as ``exp(-x[i])/sum(exp(-x))``.
927
+
928
+ The boolean tensor :attr:`mask` defines the "validity" of
929
+ :attr:`input` tensor elements: if :attr:`mask` element is True then
930
+ the corresponding element in :attr:`input` tensor will be included in
931
+ softmin computation, otherwise the element is ignored.
932
+
933
+ The values of masked-out elements of the output tensor have undefined
934
+ value: it may or may not be set to zero or nan; the choice may correspond to
935
+ the value that leads to the most efficient storage of :attr:`output`
936
+ tensor.
937
+
938
+ The mask of the softmin output tensor can be computed as
939
+ ``torch.broadcast_to(mask, input.shape)``.
940
+
941
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
942
+ don't need to match, but they must be :ref:`broadcastable
943
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
944
+ tensor must not be greater than of the :attr:`input` tensor.
945
+
946
+ Args:
947
+ input (Tensor): the input tensor
948
+ dim (int): the dimension along which softmin is computed.
949
+
950
+ Keyword args:
951
+ dtype (:class:`torch.dtype`, optional): the desired data type
952
+ of returned tensor. If specified, the input tensor is
953
+ casted to :attr:`dtype` before the operation is
954
+ performed. Default: None.
955
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
956
+ containing the binary mask of validity of input tensor
957
+ elements.
958
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
959
+
960
+ Example::
961
+
962
+ >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
963
+ >>> input
964
+ tensor([[-3., -2., -1.],
965
+ [ 0., 1., 2.]])
966
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
967
+ >>> mask
968
+ tensor([[ True, False, True],
969
+ [False, False, False]])
970
+ >>> torch.masked._ops.softmin(input, 1, mask=mask)
971
+ tensor([[0.8808, 0.0000, 0.1192],
972
+ [ nan, nan, nan]])
973
+ """
974
+
975
+ std_docstring = """std(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor
976
+ Returns standard_deviation of all the elements in the :attr:`input`
977
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
978
+ elements are masked out according to the boolean tensor
979
+ :attr:`mask`.
980
+ The identity value of sample standard deviation operation is undefined. The
981
+ elements of output tensor with strided layout, that correspond to
982
+ fully masked-out elements, have ``nan`` values.
983
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
984
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
985
+ size 1. Otherwise, :attr:`dim` is squeezed (see
986
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
987
+ ``len(dim)``) fewer dimension(s).
988
+
989
+ The boolean tensor :attr:`mask` defines the "validity" of
990
+ :attr:`input` tensor elements: if :attr:`mask` element is True
991
+ then the corresponding element in :attr:`input` tensor will be
992
+ included in standard_deviation computation, otherwise the element is
993
+ ignored.
994
+
995
+ When all elements of :attr:`input` along the given dimension
996
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
997
+ of the output tensor will have undefined value: it may or may not
998
+ correspond to the identity value of standard_deviation operation; the
999
+ choice may correspond to the value that leads to the most efficient
1000
+ storage of :attr:`output` tensor.
1001
+
1002
+ The mask of the output tensor can be computed as
1003
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
1004
+ dtype=torch.bool)``.
1005
+
1006
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
1007
+ don't need to match, but they must be :ref:`broadcastable
1008
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
1009
+ tensor must not be greater than of the :attr:`input` tensor.
1010
+
1011
+ Args:
1012
+ input (Tensor): the input tensor
1013
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
1014
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
1015
+ unbiased (bool): when True, use Bessel’s correction, otherwise, compute
1016
+ the uncorrected sample variance.
1017
+
1018
+ Keyword args:
1019
+ keepdim (bool, optional): whether the output tensor has
1020
+ :attr:`dim` retained or not. Default: False.
1021
+ dtype (:class:`torch.dtype`, optional): the desired data type
1022
+ of returned tensor. If specified, the input tensor is
1023
+ casted to :attr:`dtype` before the operation is
1024
+ performed. Default: None.
1025
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
1026
+ containing the binary mask of validity of input tensor
1027
+ elements.
1028
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
1029
+ Example::
1030
+
1031
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
1032
+ >>> input
1033
+ tensor([[-3, -2, -1],
1034
+ [ 0, 1, 2]])
1035
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
1036
+ >>> mask
1037
+ tensor([[ True, False, True],
1038
+ [False, False, False]])
1039
+ >>> torch.masked._ops.std(input, 1, False, mask=mask)
1040
+ tensor([1., nan])
1041
+ """
1042
+
1043
+ sum_docstring = """sum(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
1044
+
1045
+ Returns sum of all the elements in the :attr:`input`
1046
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
1047
+ elements are masked out according to the boolean tensor
1048
+ :attr:`mask`.
1049
+
1050
+ The identity value of sum operation, which is used to start the reduction, is ``0``.
1051
+
1052
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
1053
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
1054
+ size 1. Otherwise, :attr:`dim` is squeezed (see
1055
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
1056
+ ``len(dim)``) fewer dimension(s).
1057
+
1058
+ The boolean tensor :attr:`mask` defines the "validity" of
1059
+ :attr:`input` tensor elements: if :attr:`mask` element is True
1060
+ then the corresponding element in :attr:`input` tensor will be
1061
+ included in sum computation, otherwise the element is
1062
+ ignored.
1063
+
1064
+ When all elements of :attr:`input` along the given dimension
1065
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
1066
+ of the output tensor will have undefined value: it may or may not
1067
+ correspond to the identity value of sum operation; the
1068
+ choice may correspond to the value that leads to the most efficient
1069
+ storage of :attr:`output` tensor.
1070
+
1071
+ The mask of the output tensor can be computed as
1072
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
1073
+ dtype=torch.bool)``.
1074
+
1075
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
1076
+ don't need to match, but they must be :ref:`broadcastable
1077
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
1078
+ tensor must not be greater than of the :attr:`input` tensor.
1079
+
1080
+ Args:
1081
+ input (Tensor): the input tensor
1082
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
1083
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
1084
+
1085
+ Keyword args:
1086
+ keepdim (bool, optional): whether the output tensor has
1087
+ :attr:`dim` retained or not. Default: False.
1088
+ dtype (:class:`torch.dtype`, optional): the desired data type
1089
+ of returned tensor. If specified, the input tensor is
1090
+ casted to :attr:`dtype` before the operation is
1091
+ performed. Default: None.
1092
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
1093
+ containing the binary mask of validity of input tensor
1094
+ elements.
1095
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
1096
+
1097
+ Example::
1098
+
1099
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
1100
+ >>> input
1101
+ tensor([[-3, -2, -1],
1102
+ [ 0, 1, 2]])
1103
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
1104
+ >>> mask
1105
+ tensor([[ True, False, True],
1106
+ [False, False, False]])
1107
+ >>> torch.masked._ops.sum(input, 1, mask=mask)
1108
+ tensor([-4, 0])
1109
+ """
1110
+
1111
+ var_docstring = """var(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor
1112
+ Returns variance of all the elements in the :attr:`input`
1113
+ tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
1114
+ elements are masked out according to the boolean tensor
1115
+ :attr:`mask`.
1116
+ The identity value of sample variance operation is undefined. The
1117
+ elements of output tensor with strided layout, that correspond to
1118
+ fully masked-out elements, have ``nan`` values.
1119
+ If :attr:`keepdim` is ``True``, the output tensor is of the same size
1120
+ as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
1121
+ size 1. Otherwise, :attr:`dim` is squeezed (see
1122
+ :func:`torch.squeeze`), resulting in the output tensor having 1 (or
1123
+ ``len(dim)``) fewer dimension(s).
1124
+
1125
+ The boolean tensor :attr:`mask` defines the "validity" of
1126
+ :attr:`input` tensor elements: if :attr:`mask` element is True
1127
+ then the corresponding element in :attr:`input` tensor will be
1128
+ included in variance computation, otherwise the element is
1129
+ ignored.
1130
+
1131
+ When all elements of :attr:`input` along the given dimension
1132
+ :attr:`dim` are ignored (fully masked-out), the corresponding element
1133
+ of the output tensor will have undefined value: it may or may not
1134
+ correspond to the identity value of variance operation; the
1135
+ choice may correspond to the value that leads to the most efficient
1136
+ storage of :attr:`output` tensor.
1137
+
1138
+ The mask of the output tensor can be computed as
1139
+ ``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
1140
+ dtype=torch.bool)``.
1141
+
1142
+ The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
1143
+ don't need to match, but they must be :ref:`broadcastable
1144
+ <broadcasting-semantics>` and the dimensionality of the :attr:`mask`
1145
+ tensor must not be greater than of the :attr:`input` tensor.
1146
+
1147
+ Args:
1148
+ input (Tensor): the input tensor
1149
+ dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
1150
+ Default: None that is equivalent to ``tuple(range(input.ndim))``.
1151
+ unbiased (bool): when True, use Bessel’s correction, otherwise, compute
1152
+ the uncorrected sample variance.
1153
+
1154
+ Keyword args:
1155
+ keepdim (bool, optional): whether the output tensor has
1156
+ :attr:`dim` retained or not. Default: False.
1157
+ dtype (:class:`torch.dtype`, optional): the desired data type
1158
+ of returned tensor. If specified, the input tensor is
1159
+ casted to :attr:`dtype` before the operation is
1160
+ performed. Default: None.
1161
+ mask (:class:`torch.Tensor`, optional): the boolean tensor
1162
+ containing the binary mask of validity of input tensor
1163
+ elements.
1164
+ Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
1165
+ Example::
1166
+
1167
+ >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
1168
+ >>> input
1169
+ tensor([[-3, -2, -1],
1170
+ [ 0, 1, 2]])
1171
+ >>> mask = tensor([[ True, False, True], [False, False, False]])
1172
+ >>> mask
1173
+ tensor([[ True, False, True],
1174
+ [False, False, False]])
1175
+ >>> torch.masked._ops.var(input, 1, False, mask=mask)
1176
+ tensor([1., nan])
1177
+ """