Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__init__.py +179 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/batch_tensor.py +25 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/dim.py +110 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/magic_trace.py +42 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/op_properties.py +311 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/reference.py +645 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/tree_map.py +14 -0
- env-llmeval/lib/python3.10/site-packages/functorch/dim/wrap_type.py +71 -0
- env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so +3 -0
- env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so +3 -0
- env-llmeval/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 +3 -0
- env-llmeval/lib/python3.10/site-packages/pytz/lazy.py +172 -0
- env-llmeval/lib/python3.10/site-packages/pytz/reference.py +140 -0
- env-llmeval/lib/python3.10/site-packages/pytz/tzinfo.py +580 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/CET +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/CST6CDT +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Cuba +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/EET +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/EST +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/EST5EDT +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Egypt +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+12 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+2 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+5 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+7 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+8 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-11 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-3 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-7 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT0 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/UCT +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Universal +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Factory +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GB-Eire +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT+0 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT-0 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT0 +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/HST +0 -0
- env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Hongkong +0 -0
.gitattributes
CHANGED
@@ -150,3 +150,6 @@ env-llmeval/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolver.so.11 f
|
|
150 |
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
|
151 |
env-llmeval/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
|
152 |
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
150 |
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
|
151 |
env-llmeval/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
|
152 |
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text
|
153 |
+
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text
|
154 |
+
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
|
155 |
+
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__init__.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dis
|
2 |
+
import inspect
|
3 |
+
from typing import Sequence, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
|
7 |
+
import functorch._C
|
8 |
+
from functorch._C import dim as _C
|
9 |
+
from .tree_map import tree_flatten, tree_map
|
10 |
+
from .wrap_type import wrap_type
|
11 |
+
|
12 |
+
_C._patch_tensor_class()
|
13 |
+
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
|
14 |
+
|
15 |
+
|
16 |
+
class DimensionMismatchError(Exception):
|
17 |
+
pass
|
18 |
+
|
19 |
+
|
20 |
+
class DimensionBindError(Exception):
|
21 |
+
pass
|
22 |
+
|
23 |
+
|
24 |
+
from . import op_properties
|
25 |
+
|
26 |
+
# use dict to avoid writing C++ bindings for set
|
27 |
+
pointwise = {t: True for t in op_properties.pointwise}
|
28 |
+
|
29 |
+
use_c = True
|
30 |
+
if not use_c:
|
31 |
+
from . import reference
|
32 |
+
|
33 |
+
|
34 |
+
class _Tensor:
|
35 |
+
# fast path around slow wrapping/unwrapping logic for simply queries used
|
36 |
+
# by the implementation...
|
37 |
+
|
38 |
+
@property
|
39 |
+
def dims(self):
|
40 |
+
return tuple(d for d in self._levels if isinstance(d, Dim))
|
41 |
+
|
42 |
+
def dim(self):
|
43 |
+
return self.ndim
|
44 |
+
|
45 |
+
if use_c:
|
46 |
+
__torch_function__ = classmethod(_C.__torch_function__)
|
47 |
+
expand = _C._instancemethod(_C.expand)
|
48 |
+
else:
|
49 |
+
__torch_function__ = reference.__torch_function__
|
50 |
+
expand = reference.expand
|
51 |
+
|
52 |
+
index = _C._instancemethod(_C.index)
|
53 |
+
|
54 |
+
def __repr__(self):
|
55 |
+
tensor, levels, ndim = self._tensor, self._levels, self.ndim
|
56 |
+
return f"{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}"
|
57 |
+
|
58 |
+
|
59 |
+
TensorLike = (_Tensor, torch.Tensor)
|
60 |
+
|
61 |
+
|
62 |
+
class Dim(_C.Dim, _Tensor):
|
63 |
+
# note that _C.Dim comes before tensor because we want the Dim API for things like size to take precendence.
|
64 |
+
# Tensor defines format, but we want to print Dims with special formatting
|
65 |
+
__format__ = object.__format__
|
66 |
+
|
67 |
+
|
68 |
+
class Tensor(_Tensor, _C.Tensor):
|
69 |
+
if not use_c:
|
70 |
+
from_batched = staticmethod(_C.Tensor_from_batched)
|
71 |
+
from_positional = staticmethod(_C.Tensor_from_positional)
|
72 |
+
sum = _C._instancemethod(_C.Tensor_sum)
|
73 |
+
|
74 |
+
|
75 |
+
def cat(tensors, dim, new_dim):
|
76 |
+
n = dims()
|
77 |
+
return stack(tensors, n, dim).index([n, dim], new_dim)
|
78 |
+
|
79 |
+
|
80 |
+
if use_c:
|
81 |
+
_wrap = _C._wrap
|
82 |
+
|
83 |
+
def _def(name, *args, **kwargs):
|
84 |
+
orig = getattr(torch.Tensor, name)
|
85 |
+
setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs)))
|
86 |
+
|
87 |
+
t__getitem__ = _C._instancemethod(_C.__getitem__)
|
88 |
+
stack = _C.stack
|
89 |
+
split = _C._instancemethod(_C.split)
|
90 |
+
else:
|
91 |
+
_wrap, _def = reference._wrap, reference._def
|
92 |
+
t__getitem__ = reference.t__getitem__
|
93 |
+
stack = reference.stack
|
94 |
+
split = reference.split
|
95 |
+
|
96 |
+
# note: there is no python reference
|
97 |
+
t__setitem__ = _C._instancemethod(_C.__setitem__)
|
98 |
+
# this is patched in the C API because otherwise torch.Tensor will
|
99 |
+
# no longer be considered a sequence and things will break
|
100 |
+
# torch.Tensor.__getitem__ = t__getitem__
|
101 |
+
|
102 |
+
_Tensor.__getitem__ = t__getitem__
|
103 |
+
# torch.Tensor.__setitem__ = t__setitem__
|
104 |
+
_Tensor.__setitem__ = t__setitem__
|
105 |
+
|
106 |
+
torch.Tensor.split = split
|
107 |
+
_Tensor.split = split
|
108 |
+
torch.Tensor.expand = _C._instancemethod(_C.expand)
|
109 |
+
torch.Tensor.index = _C._instancemethod(_C.index)
|
110 |
+
wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__)
|
111 |
+
del _Tensor.ndim
|
112 |
+
|
113 |
+
if use_c:
|
114 |
+
_Tensor.order = _C._instancemethod(_C.order)
|
115 |
+
else:
|
116 |
+
_Tensor.order = reference.positional
|
117 |
+
|
118 |
+
_def("mean")
|
119 |
+
_def("sum")
|
120 |
+
_def("all")
|
121 |
+
_def("amax")
|
122 |
+
_def("amin")
|
123 |
+
_def("aminmax")
|
124 |
+
_def("any")
|
125 |
+
_def("count_nonzero")
|
126 |
+
_def("logsumexp")
|
127 |
+
_def("nanmean")
|
128 |
+
_def("nansum")
|
129 |
+
_def("prod")
|
130 |
+
_def("std", keepdim_offset=2)
|
131 |
+
_def("var", keepdim_offset=2)
|
132 |
+
_def("max", single_dim=True)
|
133 |
+
_def("min", single_dim=True)
|
134 |
+
_def("argmax", single_dim=True)
|
135 |
+
_def("argmin", single_dim=True)
|
136 |
+
_def("kthvalue", single_dim=True)
|
137 |
+
_def("median", single_dim=True)
|
138 |
+
_def("nanmedian", single_dim=True)
|
139 |
+
_def("mode", single_dim=True)
|
140 |
+
_def("sort", reduce=False)
|
141 |
+
_def("argsort", reduce=False)
|
142 |
+
_def("unbind", single_dim=True)
|
143 |
+
_def("chunk", dim_offset=1, reduce=False)
|
144 |
+
_def("cummax", single_dim=True, reduce=False)
|
145 |
+
_def("cummin", single_dim=True, reduce=False)
|
146 |
+
_def("cumprod", single_dim=True, reduce=False)
|
147 |
+
_def("cumprod_", single_dim=True, reduce=False)
|
148 |
+
_def("cumsum", single_dim=True, reduce=False)
|
149 |
+
_def("cumsum_", single_dim=True, reduce=False)
|
150 |
+
_def("logcumsumexp", single_dim=True, reduce=False)
|
151 |
+
_def("renorm", dim_offset=1, single_dim=True, reduce=False)
|
152 |
+
_def("softmax", single_dim=True, reduce=False)
|
153 |
+
softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False)
|
154 |
+
|
155 |
+
# stuff to handle in the future, because they require special
|
156 |
+
# binding logic for dims
|
157 |
+
# cross
|
158 |
+
# diag_embed
|
159 |
+
# diagonal
|
160 |
+
# diagonal_scatter
|
161 |
+
# diff
|
162 |
+
# nanquantile
|
163 |
+
# quantile
|
164 |
+
# roll
|
165 |
+
# rot90
|
166 |
+
# topk (new dimes on output)
|
167 |
+
# should these all be subsumed by inplace indexing?
|
168 |
+
# index_add_
|
169 |
+
# index_add
|
170 |
+
# index_copy
|
171 |
+
# index_copy_
|
172 |
+
# index_fill
|
173 |
+
# index_fill_
|
174 |
+
# index_select
|
175 |
+
# scatter
|
176 |
+
# scatter_
|
177 |
+
# scatter_add
|
178 |
+
# scatter_add_
|
179 |
+
# scatter_reduce
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.28 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/batch_tensor.cpython-310.pyc
ADDED
Binary file (786 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/delayed_mul_tensor.cpython-310.pyc
ADDED
Binary file (3.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/dim.cpython-310.pyc
ADDED
Binary file (3.95 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/magic_trace.cpython-310.pyc
ADDED
Binary file (1.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/op_properties.cpython-310.pyc
ADDED
Binary file (5.64 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/reference.cpython-310.pyc
ADDED
Binary file (16.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/tree_map.cpython-310.pyc
ADDED
Binary file (540 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/__pycache__/wrap_type.cpython-310.pyc
ADDED
Binary file (1.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/batch_tensor.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
from contextlib import contextmanager
|
7 |
+
|
8 |
+
from torch._C._functorch import _vmap_add_layers, _vmap_remove_layers
|
9 |
+
|
10 |
+
_enabled = False
|
11 |
+
|
12 |
+
|
13 |
+
@contextmanager
|
14 |
+
def _enable_layers(dims):
|
15 |
+
global _enabled
|
16 |
+
assert not _enabled
|
17 |
+
input = sorted((d._level, d.size) for d in dims if not isinstance(d, int))
|
18 |
+
n = len(input)
|
19 |
+
try:
|
20 |
+
_vmap_add_layers(input)
|
21 |
+
_enabled = True
|
22 |
+
yield
|
23 |
+
finally:
|
24 |
+
_enabled = False
|
25 |
+
_vmap_remove_layers(n)
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/dim.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
_vmap_levels = []
|
7 |
+
|
8 |
+
|
9 |
+
@dataclass
|
10 |
+
class LevelInfo:
|
11 |
+
level: int
|
12 |
+
alive: bool = True
|
13 |
+
|
14 |
+
|
15 |
+
class Dim:
|
16 |
+
def __init__(self, name: str, size: Union[None, int] = None):
|
17 |
+
self.name = name
|
18 |
+
self._size = None
|
19 |
+
self._vmap_level = None
|
20 |
+
if size is not None:
|
21 |
+
self.size = size
|
22 |
+
|
23 |
+
def __del__(self):
|
24 |
+
if self._vmap_level is not None:
|
25 |
+
_vmap_active_levels[self._vmap_stack].alive = False
|
26 |
+
while (
|
27 |
+
not _vmap_levels[-1].alive and current_level() == _vmap_levels[-1].level
|
28 |
+
):
|
29 |
+
_vmap_decrement_nesting()
|
30 |
+
_vmap_levels.pop()
|
31 |
+
|
32 |
+
@property
|
33 |
+
def size(self):
|
34 |
+
assert self.is_bound
|
35 |
+
return self._size
|
36 |
+
|
37 |
+
@size.setter
|
38 |
+
def size(self, size: int):
|
39 |
+
if self._size is None:
|
40 |
+
self._size = size
|
41 |
+
self._vmap_level = _vmap_increment_nesting(size, "same")
|
42 |
+
self._vmap_stack = len(_vmap_levels)
|
43 |
+
_vmap_levels.append(LevelInfo(self._vmap_level))
|
44 |
+
|
45 |
+
elif self._size != size:
|
46 |
+
raise DimensionBindError(
|
47 |
+
f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}"
|
48 |
+
)
|
49 |
+
|
50 |
+
@property
|
51 |
+
def is_bound(self):
|
52 |
+
return self._size is not None
|
53 |
+
|
54 |
+
def __repr__(self):
|
55 |
+
return self.name
|
56 |
+
|
57 |
+
|
58 |
+
def extract_name(inst):
|
59 |
+
assert inst.opname == "STORE_FAST" or inst.opname == "STORE_NAME"
|
60 |
+
return inst.argval
|
61 |
+
|
62 |
+
|
63 |
+
_cache = {}
|
64 |
+
|
65 |
+
|
66 |
+
def dims(lists=0):
|
67 |
+
frame = inspect.currentframe()
|
68 |
+
assert frame is not None
|
69 |
+
calling_frame = frame.f_back
|
70 |
+
assert calling_frame is not None
|
71 |
+
code, lasti = calling_frame.f_code, calling_frame.f_lasti
|
72 |
+
key = (code, lasti)
|
73 |
+
if key not in _cache:
|
74 |
+
first = lasti // 2 + 1
|
75 |
+
instructions = list(dis.get_instructions(calling_frame.f_code))
|
76 |
+
unpack = instructions[first]
|
77 |
+
|
78 |
+
if unpack.opname == "STORE_FAST" or unpack.opname == "STORE_NAME":
|
79 |
+
# just a single dim, not a list
|
80 |
+
name = unpack.argval
|
81 |
+
ctor = Dim if lists == 0 else DimList
|
82 |
+
_cache[key] = lambda: ctor(name=name)
|
83 |
+
else:
|
84 |
+
assert unpack.opname == "UNPACK_SEQUENCE"
|
85 |
+
ndims = unpack.argval
|
86 |
+
names = tuple(
|
87 |
+
extract_name(instructions[first + 1 + i]) for i in range(ndims)
|
88 |
+
)
|
89 |
+
first_list = len(names) - lists
|
90 |
+
_cache[key] = lambda: tuple(
|
91 |
+
Dim(n) if i < first_list else DimList(name=n)
|
92 |
+
for i, n in enumerate(names)
|
93 |
+
)
|
94 |
+
return _cache[key]()
|
95 |
+
|
96 |
+
|
97 |
+
def _dim_set(positional, arg):
|
98 |
+
def convert(a):
|
99 |
+
if isinstance(a, Dim):
|
100 |
+
return a
|
101 |
+
else:
|
102 |
+
assert isinstance(a, int)
|
103 |
+
return positional[a]
|
104 |
+
|
105 |
+
if arg is None:
|
106 |
+
return positional
|
107 |
+
elif not isinstance(arg, (Dim, int)):
|
108 |
+
return tuple(convert(a) for a in arg)
|
109 |
+
else:
|
110 |
+
return (convert(arg),)
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/magic_trace.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
import os
|
7 |
+
import signal
|
8 |
+
import subprocess
|
9 |
+
from contextlib import contextmanager
|
10 |
+
|
11 |
+
|
12 |
+
@contextmanager
|
13 |
+
def magic_trace(output="trace.fxt", magic_trace_cache="/tmp/magic-trace"):
|
14 |
+
pid = os.getpid()
|
15 |
+
if not os.path.exists(magic_trace_cache):
|
16 |
+
print(f"Downloading magic_trace to: {magic_trace_cache}")
|
17 |
+
subprocess.run(
|
18 |
+
[
|
19 |
+
"wget",
|
20 |
+
"-O",
|
21 |
+
magic_trace_cache,
|
22 |
+
"-q",
|
23 |
+
"https://github.com/janestreet/magic-trace/releases/download/v1.0.2/magic-trace",
|
24 |
+
]
|
25 |
+
)
|
26 |
+
subprocess.run(["chmod", "+x", magic_trace_cache])
|
27 |
+
args = [magic_trace_cache, "attach", "-pid", str(pid), "-o", output]
|
28 |
+
p = subprocess.Popen(args, stderr=subprocess.PIPE, encoding="utf-8")
|
29 |
+
while True:
|
30 |
+
x = p.stderr.readline()
|
31 |
+
print(x)
|
32 |
+
if "Attached" in x:
|
33 |
+
break
|
34 |
+
try:
|
35 |
+
yield
|
36 |
+
finally:
|
37 |
+
p.send_signal(signal.SIGINT)
|
38 |
+
r = p.wait()
|
39 |
+
print(p.stderr.read())
|
40 |
+
p.stderr.close()
|
41 |
+
if r != 0:
|
42 |
+
raise ValueError(f"magic_trace exited abnormally: {r}")
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/op_properties.py
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
import torch
|
7 |
+
|
8 |
+
# pointwise operators can go through a faster pathway
|
9 |
+
|
10 |
+
tensor_magic_methods = ["add", ""]
|
11 |
+
pointwise_magic_methods_with_reverse = (
|
12 |
+
"add",
|
13 |
+
"sub",
|
14 |
+
"mul",
|
15 |
+
"floordiv",
|
16 |
+
"div",
|
17 |
+
"truediv",
|
18 |
+
"mod",
|
19 |
+
"pow",
|
20 |
+
"lshift",
|
21 |
+
"rshift",
|
22 |
+
"and",
|
23 |
+
"or",
|
24 |
+
"xor",
|
25 |
+
)
|
26 |
+
pointwise_magic_methods = (
|
27 |
+
*(x for m in pointwise_magic_methods_with_reverse for x in (m, "r" + m)),
|
28 |
+
"eq",
|
29 |
+
"gt",
|
30 |
+
"le",
|
31 |
+
"lt",
|
32 |
+
"ge",
|
33 |
+
"gt",
|
34 |
+
"ne",
|
35 |
+
"neg",
|
36 |
+
"pos",
|
37 |
+
"abs",
|
38 |
+
"invert",
|
39 |
+
"iadd",
|
40 |
+
"isub",
|
41 |
+
"imul",
|
42 |
+
"ifloordiv",
|
43 |
+
"idiv",
|
44 |
+
"itruediv",
|
45 |
+
"imod",
|
46 |
+
"ipow",
|
47 |
+
"ilshift",
|
48 |
+
"irshift",
|
49 |
+
"iand",
|
50 |
+
"ior",
|
51 |
+
"ixor",
|
52 |
+
"int",
|
53 |
+
"long",
|
54 |
+
"float",
|
55 |
+
"complex",
|
56 |
+
)
|
57 |
+
|
58 |
+
pointwise_methods = (*(f"__{m}__" for m in pointwise_magic_methods),)
|
59 |
+
|
60 |
+
pointwise = (
|
61 |
+
*(getattr(torch.Tensor, m) for m in pointwise_methods),
|
62 |
+
torch.nn.functional.dropout,
|
63 |
+
torch.where,
|
64 |
+
torch.Tensor.abs,
|
65 |
+
torch.abs,
|
66 |
+
torch.Tensor.acos,
|
67 |
+
torch.acos,
|
68 |
+
torch.Tensor.acosh,
|
69 |
+
torch.acosh,
|
70 |
+
torch.Tensor.add,
|
71 |
+
torch.add,
|
72 |
+
torch.Tensor.addcdiv,
|
73 |
+
torch.addcdiv,
|
74 |
+
torch.Tensor.addcmul,
|
75 |
+
torch.addcmul,
|
76 |
+
torch.Tensor.addr,
|
77 |
+
torch.addr,
|
78 |
+
torch.Tensor.angle,
|
79 |
+
torch.angle,
|
80 |
+
torch.Tensor.asin,
|
81 |
+
torch.asin,
|
82 |
+
torch.Tensor.asinh,
|
83 |
+
torch.asinh,
|
84 |
+
torch.Tensor.atan,
|
85 |
+
torch.atan,
|
86 |
+
torch.Tensor.atan2,
|
87 |
+
torch.atan2,
|
88 |
+
torch.Tensor.atanh,
|
89 |
+
torch.atanh,
|
90 |
+
torch.Tensor.bitwise_and,
|
91 |
+
torch.bitwise_and,
|
92 |
+
torch.Tensor.bitwise_left_shift,
|
93 |
+
torch.bitwise_left_shift,
|
94 |
+
torch.Tensor.bitwise_not,
|
95 |
+
torch.bitwise_not,
|
96 |
+
torch.Tensor.bitwise_or,
|
97 |
+
torch.bitwise_or,
|
98 |
+
torch.Tensor.bitwise_right_shift,
|
99 |
+
torch.bitwise_right_shift,
|
100 |
+
torch.Tensor.bitwise_xor,
|
101 |
+
torch.bitwise_xor,
|
102 |
+
torch.Tensor.ceil,
|
103 |
+
torch.ceil,
|
104 |
+
torch.celu,
|
105 |
+
torch.nn.functional.celu,
|
106 |
+
torch.Tensor.clamp,
|
107 |
+
torch.clamp,
|
108 |
+
torch.Tensor.clamp_max,
|
109 |
+
torch.clamp_max,
|
110 |
+
torch.Tensor.clamp_min,
|
111 |
+
torch.clamp_min,
|
112 |
+
torch.Tensor.copysign,
|
113 |
+
torch.copysign,
|
114 |
+
torch.Tensor.cos,
|
115 |
+
torch.cos,
|
116 |
+
torch.Tensor.cosh,
|
117 |
+
torch.cosh,
|
118 |
+
torch.Tensor.deg2rad,
|
119 |
+
torch.deg2rad,
|
120 |
+
torch.Tensor.digamma,
|
121 |
+
torch.digamma,
|
122 |
+
torch.Tensor.div,
|
123 |
+
torch.div,
|
124 |
+
torch.dropout,
|
125 |
+
torch.nn.functional.dropout,
|
126 |
+
torch.nn.functional.elu,
|
127 |
+
torch.Tensor.eq,
|
128 |
+
torch.eq,
|
129 |
+
torch.Tensor.erf,
|
130 |
+
torch.erf,
|
131 |
+
torch.Tensor.erfc,
|
132 |
+
torch.erfc,
|
133 |
+
torch.Tensor.erfinv,
|
134 |
+
torch.erfinv,
|
135 |
+
torch.Tensor.exp,
|
136 |
+
torch.exp,
|
137 |
+
torch.Tensor.exp2,
|
138 |
+
torch.exp2,
|
139 |
+
torch.Tensor.expm1,
|
140 |
+
torch.expm1,
|
141 |
+
torch.feature_dropout,
|
142 |
+
torch.Tensor.float_power,
|
143 |
+
torch.float_power,
|
144 |
+
torch.Tensor.floor,
|
145 |
+
torch.floor,
|
146 |
+
torch.Tensor.floor_divide,
|
147 |
+
torch.floor_divide,
|
148 |
+
torch.Tensor.fmod,
|
149 |
+
torch.fmod,
|
150 |
+
torch.Tensor.frac,
|
151 |
+
torch.frac,
|
152 |
+
torch.Tensor.frexp,
|
153 |
+
torch.frexp,
|
154 |
+
torch.Tensor.gcd,
|
155 |
+
torch.gcd,
|
156 |
+
torch.Tensor.ge,
|
157 |
+
torch.ge,
|
158 |
+
torch.nn.functional.gelu,
|
159 |
+
torch.nn.functional.glu,
|
160 |
+
torch.Tensor.gt,
|
161 |
+
torch.gt,
|
162 |
+
torch.Tensor.hardshrink,
|
163 |
+
torch.hardshrink,
|
164 |
+
torch.nn.functional.hardshrink,
|
165 |
+
torch.nn.functional.hardsigmoid,
|
166 |
+
torch.nn.functional.hardswish,
|
167 |
+
torch.nn.functional.hardtanh,
|
168 |
+
torch.Tensor.heaviside,
|
169 |
+
torch.heaviside,
|
170 |
+
torch.Tensor.hypot,
|
171 |
+
torch.hypot,
|
172 |
+
torch.Tensor.i0,
|
173 |
+
torch.i0,
|
174 |
+
torch.Tensor.igamma,
|
175 |
+
torch.igamma,
|
176 |
+
torch.Tensor.igammac,
|
177 |
+
torch.igammac,
|
178 |
+
torch.Tensor.isclose,
|
179 |
+
torch.isclose,
|
180 |
+
torch.Tensor.isfinite,
|
181 |
+
torch.isfinite,
|
182 |
+
torch.Tensor.isinf,
|
183 |
+
torch.isinf,
|
184 |
+
torch.Tensor.isnan,
|
185 |
+
torch.isnan,
|
186 |
+
torch.Tensor.isneginf,
|
187 |
+
torch.isneginf,
|
188 |
+
torch.Tensor.isposinf,
|
189 |
+
torch.isposinf,
|
190 |
+
torch.Tensor.isreal,
|
191 |
+
torch.isreal,
|
192 |
+
torch.Tensor.kron,
|
193 |
+
torch.kron,
|
194 |
+
torch.Tensor.lcm,
|
195 |
+
torch.lcm,
|
196 |
+
torch.Tensor.ldexp,
|
197 |
+
torch.ldexp,
|
198 |
+
torch.Tensor.le,
|
199 |
+
torch.le,
|
200 |
+
torch.nn.functional.leaky_relu,
|
201 |
+
torch.Tensor.lerp,
|
202 |
+
torch.lerp,
|
203 |
+
torch.Tensor.lgamma,
|
204 |
+
torch.lgamma,
|
205 |
+
torch.Tensor.log,
|
206 |
+
torch.log,
|
207 |
+
torch.Tensor.log10,
|
208 |
+
torch.log10,
|
209 |
+
torch.Tensor.log1p,
|
210 |
+
torch.log1p,
|
211 |
+
torch.Tensor.log2,
|
212 |
+
torch.log2,
|
213 |
+
torch.nn.functional.logsigmoid,
|
214 |
+
torch.Tensor.logical_and,
|
215 |
+
torch.logical_and,
|
216 |
+
torch.Tensor.logical_not,
|
217 |
+
torch.logical_not,
|
218 |
+
torch.Tensor.logical_or,
|
219 |
+
torch.logical_or,
|
220 |
+
torch.Tensor.logical_xor,
|
221 |
+
torch.logical_xor,
|
222 |
+
torch.Tensor.logit,
|
223 |
+
torch.logit,
|
224 |
+
torch.Tensor.lt,
|
225 |
+
torch.lt,
|
226 |
+
torch.Tensor.maximum,
|
227 |
+
torch.maximum,
|
228 |
+
torch.Tensor.minimum,
|
229 |
+
torch.minimum,
|
230 |
+
torch.nn.functional.mish,
|
231 |
+
torch.Tensor.mvlgamma,
|
232 |
+
torch.mvlgamma,
|
233 |
+
torch.Tensor.nan_to_num,
|
234 |
+
torch.nan_to_num,
|
235 |
+
torch.Tensor.ne,
|
236 |
+
torch.ne,
|
237 |
+
torch.Tensor.neg,
|
238 |
+
torch.neg,
|
239 |
+
torch.Tensor.nextafter,
|
240 |
+
torch.nextafter,
|
241 |
+
torch.Tensor.outer,
|
242 |
+
torch.outer,
|
243 |
+
torch.polar,
|
244 |
+
torch.Tensor.polygamma,
|
245 |
+
torch.polygamma,
|
246 |
+
torch.Tensor.positive,
|
247 |
+
torch.positive,
|
248 |
+
torch.Tensor.pow,
|
249 |
+
torch.pow,
|
250 |
+
torch.Tensor.prelu,
|
251 |
+
torch.prelu,
|
252 |
+
torch.nn.functional.prelu,
|
253 |
+
torch.Tensor.rad2deg,
|
254 |
+
torch.rad2deg,
|
255 |
+
torch.Tensor.reciprocal,
|
256 |
+
torch.reciprocal,
|
257 |
+
torch.Tensor.relu,
|
258 |
+
torch.relu,
|
259 |
+
torch.nn.functional.relu,
|
260 |
+
torch.nn.functional.relu6,
|
261 |
+
torch.Tensor.remainder,
|
262 |
+
torch.remainder,
|
263 |
+
torch.Tensor.round,
|
264 |
+
torch.round,
|
265 |
+
torch.rrelu,
|
266 |
+
torch.nn.functional.rrelu,
|
267 |
+
torch.Tensor.rsqrt,
|
268 |
+
torch.rsqrt,
|
269 |
+
torch.rsub,
|
270 |
+
torch.selu,
|
271 |
+
torch.nn.functional.selu,
|
272 |
+
torch.Tensor.sgn,
|
273 |
+
torch.sgn,
|
274 |
+
torch.Tensor.sigmoid,
|
275 |
+
torch.sigmoid,
|
276 |
+
torch.nn.functional.sigmoid,
|
277 |
+
torch.Tensor.sign,
|
278 |
+
torch.sign,
|
279 |
+
torch.Tensor.signbit,
|
280 |
+
torch.signbit,
|
281 |
+
torch.nn.functional.silu,
|
282 |
+
torch.Tensor.sin,
|
283 |
+
torch.sin,
|
284 |
+
torch.Tensor.sinc,
|
285 |
+
torch.sinc,
|
286 |
+
torch.Tensor.sinh,
|
287 |
+
torch.sinh,
|
288 |
+
torch.nn.functional.softplus,
|
289 |
+
torch.nn.functional.softshrink,
|
290 |
+
torch.Tensor.sqrt,
|
291 |
+
torch.sqrt,
|
292 |
+
torch.Tensor.square,
|
293 |
+
torch.square,
|
294 |
+
torch.Tensor.sub,
|
295 |
+
torch.sub,
|
296 |
+
torch.Tensor.tan,
|
297 |
+
torch.tan,
|
298 |
+
torch.Tensor.tanh,
|
299 |
+
torch.tanh,
|
300 |
+
torch.nn.functional.tanh,
|
301 |
+
torch.threshold,
|
302 |
+
torch.nn.functional.threshold,
|
303 |
+
torch.trapz,
|
304 |
+
torch.Tensor.true_divide,
|
305 |
+
torch.true_divide,
|
306 |
+
torch.Tensor.trunc,
|
307 |
+
torch.trunc,
|
308 |
+
torch.Tensor.xlogy,
|
309 |
+
torch.xlogy,
|
310 |
+
torch.rand_like,
|
311 |
+
)
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/reference.py
ADDED
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
# reference python implementations for C ops
|
8 |
+
import torch
|
9 |
+
|
10 |
+
from functorch._C import dim as _C
|
11 |
+
from . import op_properties
|
12 |
+
from .batch_tensor import _enable_layers
|
13 |
+
from .tree_map import tree_flatten, tree_map
|
14 |
+
|
15 |
+
DimList = _C.DimList
|
16 |
+
import operator
|
17 |
+
from functools import reduce
|
18 |
+
|
19 |
+
|
20 |
+
# use dict to avoid writing C++ bindings for set
|
21 |
+
pointwise = set(op_properties.pointwise)
|
22 |
+
|
23 |
+
|
24 |
+
def prod(x):
|
25 |
+
return reduce(operator.mul, x, 1)
|
26 |
+
|
27 |
+
|
28 |
+
def _wrap_dim(d, N, keepdim):
|
29 |
+
from . import Dim
|
30 |
+
|
31 |
+
if isinstance(d, Dim):
|
32 |
+
assert not keepdim, "cannot preserve first-class dimensions with keepdim=True"
|
33 |
+
return d
|
34 |
+
elif d >= 0:
|
35 |
+
return d - N
|
36 |
+
else:
|
37 |
+
return d
|
38 |
+
|
39 |
+
|
40 |
+
def _dims(d, N, keepdim, single_dim):
|
41 |
+
from . import Dim
|
42 |
+
|
43 |
+
if isinstance(d, (Dim, int)):
|
44 |
+
return ltuple((_wrap_dim(d, N, keepdim),))
|
45 |
+
assert not single_dim, f"expected a single dimension or int but found: {d}"
|
46 |
+
return ltuple(_wrap_dim(x, N, keepdim) for x in d)
|
47 |
+
|
48 |
+
|
49 |
+
def _bind_dims_to_size(lhs_size, rhs, lhs_debug):
|
50 |
+
from . import DimensionMismatchError
|
51 |
+
|
52 |
+
not_bound = tuple((i, r) for i, r in enumerate(rhs) if not r.is_bound)
|
53 |
+
if len(not_bound) == 1:
|
54 |
+
idx, d = not_bound[0]
|
55 |
+
rhs_so_far = prod(r.size for r in rhs if r.is_bound)
|
56 |
+
if lhs_size % rhs_so_far != 0:
|
57 |
+
rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs)
|
58 |
+
raise DimensionMismatchError(
|
59 |
+
f"inferred dimension does not evenly fit into larger dimension: {lhs_size} vs {rhs_s}"
|
60 |
+
)
|
61 |
+
new_size = lhs_size // rhs_so_far
|
62 |
+
d.size = new_size
|
63 |
+
elif len(not_bound) > 1:
|
64 |
+
rhs_s = tuple("?" if not r.is_bound else str(r.size) for r in rhs)
|
65 |
+
raise DimensionMismatchError(
|
66 |
+
f"cannot infer the size of two dimensions at once: {rhs} with sizes {rhs_s}"
|
67 |
+
)
|
68 |
+
else:
|
69 |
+
rhs_size = prod(r.size for r in rhs)
|
70 |
+
if lhs_size != rhs_size:
|
71 |
+
raise DimensionMismatchError(
|
72 |
+
f"Dimension sizes to do not match ({lhs_size} != {rhs_size}) when matching {lhs_debug} to {rhs}"
|
73 |
+
)
|
74 |
+
|
75 |
+
|
76 |
+
def _tensor_levels(inp):
|
77 |
+
from . import _Tensor
|
78 |
+
|
79 |
+
if isinstance(inp, _Tensor):
|
80 |
+
return inp._tensor, llist(inp._levels), inp._has_device
|
81 |
+
else:
|
82 |
+
return inp, llist(range(-inp.ndim, 0)), True
|
83 |
+
|
84 |
+
|
85 |
+
def _match_levels(v, from_levels, to_levels):
|
86 |
+
view = []
|
87 |
+
permute = []
|
88 |
+
requires_view = False
|
89 |
+
size = v.size()
|
90 |
+
for t in to_levels:
|
91 |
+
try:
|
92 |
+
idx = from_levels.index(t)
|
93 |
+
permute.append(idx)
|
94 |
+
view.append(size[idx])
|
95 |
+
except ValueError:
|
96 |
+
view.append(1)
|
97 |
+
requires_view = True
|
98 |
+
if permute != list(range(len(permute))):
|
99 |
+
v = v.permute(*permute)
|
100 |
+
if requires_view:
|
101 |
+
v = v.view(*view)
|
102 |
+
return v
|
103 |
+
|
104 |
+
|
105 |
+
# make a single dimension positional but do not permute it,
|
106 |
+
# used to do multi-tensor operators where the dim being acted on
|
107 |
+
# should not physically move if possible
|
108 |
+
def _positional_no_permute(self, dim, expand_dim=False):
|
109 |
+
from . import Tensor
|
110 |
+
|
111 |
+
ptensor, levels = self._tensor, llist(self._levels)
|
112 |
+
try:
|
113 |
+
idx = levels.index(dim)
|
114 |
+
except ValueError:
|
115 |
+
if not expand_dim:
|
116 |
+
raise
|
117 |
+
idx = 0
|
118 |
+
ptensor = ptensor.expand(dim.size, *ptensor.size())
|
119 |
+
levels.insert(0, 0)
|
120 |
+
idx_batched = 0
|
121 |
+
for i in range(idx):
|
122 |
+
if isinstance(levels[i], int):
|
123 |
+
levels[i] -= 1
|
124 |
+
idx_batched += 1
|
125 |
+
levels[idx] = -idx_batched - 1
|
126 |
+
return Tensor.from_positional(ptensor, levels, self._has_device), idx_batched
|
127 |
+
|
128 |
+
|
129 |
+
def seq(a, b):
|
130 |
+
from . import Dim
|
131 |
+
|
132 |
+
if isinstance(a, Dim) != isinstance(b, Dim):
|
133 |
+
return False
|
134 |
+
if isinstance(a, Dim):
|
135 |
+
return a is b
|
136 |
+
else:
|
137 |
+
return a == b
|
138 |
+
|
139 |
+
|
140 |
+
class isin:
|
141 |
+
def __contains__(self, item):
|
142 |
+
for x in self:
|
143 |
+
if seq(item, x):
|
144 |
+
return True
|
145 |
+
return False
|
146 |
+
|
147 |
+
def index(self, item):
|
148 |
+
for i, x in enumerate(self):
|
149 |
+
if seq(item, x):
|
150 |
+
return i
|
151 |
+
raise ValueError
|
152 |
+
|
153 |
+
|
154 |
+
class llist(isin, list):
|
155 |
+
pass
|
156 |
+
|
157 |
+
|
158 |
+
class ltuple(isin, tuple):
|
159 |
+
pass
|
160 |
+
|
161 |
+
|
162 |
+
empty_dict = {}
|
163 |
+
|
164 |
+
|
165 |
+
@classmethod
|
166 |
+
def __torch_function__(self, orig, cls, args, kwargs=empty_dict):
|
167 |
+
from . import _Tensor, Tensor, TensorLike
|
168 |
+
from .delayed_mul_tensor import DelayedMulTensor
|
169 |
+
|
170 |
+
if orig is torch.Tensor.__mul__:
|
171 |
+
lhs, rhs = args
|
172 |
+
if (
|
173 |
+
isinstance(lhs, _Tensor)
|
174 |
+
and isinstance(rhs, _Tensor)
|
175 |
+
and lhs.ndim == 0
|
176 |
+
and rhs.ndim == 0
|
177 |
+
):
|
178 |
+
return DelayedMulTensor(lhs, rhs)
|
179 |
+
all_dims = llist()
|
180 |
+
flat_args, unflatten = tree_flatten((args, kwargs))
|
181 |
+
device_holding_tensor = None
|
182 |
+
for f in flat_args:
|
183 |
+
if isinstance(f, _Tensor):
|
184 |
+
if f._has_device:
|
185 |
+
device_holding_tensor = f._batchtensor
|
186 |
+
for d in f.dims:
|
187 |
+
if d not in all_dims:
|
188 |
+
all_dims.append(d)
|
189 |
+
|
190 |
+
def unwrap(t):
|
191 |
+
if isinstance(t, _Tensor):
|
192 |
+
r = t._batchtensor
|
193 |
+
if device_holding_tensor is not None and not t._has_device:
|
194 |
+
r = r.to(device=device_holding_tensor.device)
|
195 |
+
return r
|
196 |
+
return t
|
197 |
+
|
198 |
+
if orig in pointwise:
|
199 |
+
result_levels = llist()
|
200 |
+
arg_levels = llist()
|
201 |
+
to_expand = []
|
202 |
+
for i, f in enumerate(flat_args):
|
203 |
+
if isinstance(f, TensorLike):
|
204 |
+
ptensor, levels, _ = _tensor_levels(f)
|
205 |
+
if (
|
206 |
+
isinstance(f, _Tensor)
|
207 |
+
and not f._has_device
|
208 |
+
and device_holding_tensor is not None
|
209 |
+
):
|
210 |
+
ptensor = ptensor.to(device=device_holding_tensor.device)
|
211 |
+
flat_args[i] = ptensor
|
212 |
+
for l in levels:
|
213 |
+
if l not in result_levels:
|
214 |
+
result_levels.append(l)
|
215 |
+
to_expand.append((i, levels))
|
216 |
+
|
217 |
+
for i, levels in to_expand:
|
218 |
+
flat_args[i] = _match_levels(flat_args[i], levels, result_levels)
|
219 |
+
args, kwargs = unflatten(flat_args)
|
220 |
+
result = orig(*args, **kwargs)
|
221 |
+
|
222 |
+
def wrap(t):
|
223 |
+
if isinstance(t, TensorLike):
|
224 |
+
return Tensor.from_positional(
|
225 |
+
t, result_levels, device_holding_tensor is not None
|
226 |
+
)
|
227 |
+
return t
|
228 |
+
|
229 |
+
return tree_map(wrap, result)
|
230 |
+
else:
|
231 |
+
|
232 |
+
def wrap(t):
|
233 |
+
if isinstance(t, TensorLike):
|
234 |
+
return Tensor.from_batched(t, device_holding_tensor is not None)
|
235 |
+
return t
|
236 |
+
|
237 |
+
with _enable_layers(all_dims):
|
238 |
+
print(f"batch_tensor for {orig}")
|
239 |
+
args, kwargs = unflatten(unwrap(f) for f in flat_args)
|
240 |
+
result = orig(*args, **kwargs)
|
241 |
+
# print("END", orig)
|
242 |
+
return tree_map(wrap, result)
|
243 |
+
|
244 |
+
|
245 |
+
def positional(self, *dims):
|
246 |
+
from . import Dim, Tensor
|
247 |
+
|
248 |
+
ptensor, levels = self._tensor, llist(self._levels)
|
249 |
+
flat_dims = llist()
|
250 |
+
view = []
|
251 |
+
needs_view = False
|
252 |
+
ndim = self.ndim
|
253 |
+
for d in dims:
|
254 |
+
if isinstance(d, DimList):
|
255 |
+
flat_dims.extend(d)
|
256 |
+
view.extend(e.size for e in d)
|
257 |
+
elif isinstance(d, Dim):
|
258 |
+
flat_dims.append(d)
|
259 |
+
view.append(d.size)
|
260 |
+
elif isinstance(d, int):
|
261 |
+
d = _wrap_dim(d, ndim, False)
|
262 |
+
flat_dims.append(d)
|
263 |
+
view.append(ptensor.size(d))
|
264 |
+
else:
|
265 |
+
flat_dims.extend(d)
|
266 |
+
view.append(prod(e.size for e in d))
|
267 |
+
needs_view = True
|
268 |
+
|
269 |
+
permute = list(range(len(levels)))
|
270 |
+
nflat = len(flat_dims)
|
271 |
+
for i, d in enumerate(flat_dims):
|
272 |
+
try:
|
273 |
+
idx = levels.index(d)
|
274 |
+
except ValueError as e:
|
275 |
+
raise DimensionBindError(
|
276 |
+
f"tensor of dimensions {self.dims} does not contain dim {d}"
|
277 |
+
) from e
|
278 |
+
p = permute[idx]
|
279 |
+
del levels[idx]
|
280 |
+
del permute[idx]
|
281 |
+
levels.insert(i, 0)
|
282 |
+
permute.insert(i, p)
|
283 |
+
ptensor = ptensor.permute(*permute)
|
284 |
+
seen = 0
|
285 |
+
for i in range(len(levels) - 1, -1, -1):
|
286 |
+
if isinstance(levels[i], int):
|
287 |
+
seen += 1
|
288 |
+
levels[i] = -seen
|
289 |
+
result = Tensor.from_positional(ptensor, levels, self._has_device)
|
290 |
+
if needs_view:
|
291 |
+
result = result.reshape(*view, *result.size()[len(flat_dims) :])
|
292 |
+
return result
|
293 |
+
|
294 |
+
|
295 |
+
def _contains_dim(input):
|
296 |
+
from . import Dim
|
297 |
+
|
298 |
+
for i in input:
|
299 |
+
if isinstance(i, Dim):
|
300 |
+
return True
|
301 |
+
|
302 |
+
|
303 |
+
def expand(self, *sizes):
|
304 |
+
if not _contains_dim(sizes):
|
305 |
+
return self.__torch_function__(torch.Tensor.expand, None, (self, *sizes))
|
306 |
+
dims = sizes
|
307 |
+
sizes = [d.size for d in dims] + [-1] * self.ndim
|
308 |
+
self = self.expand(*sizes)
|
309 |
+
return self[dims]
|
310 |
+
|
311 |
+
|
312 |
+
_not_present = object()
|
313 |
+
|
314 |
+
|
315 |
+
def _getarg(name, offset, args, kwargs, default):
|
316 |
+
if len(args) > offset:
|
317 |
+
return args[offset]
|
318 |
+
return kwargs.get(name, default)
|
319 |
+
|
320 |
+
|
321 |
+
def _patcharg(name, offset, args, kwargs, value):
|
322 |
+
if len(args) > offset:
|
323 |
+
args[offset] = value
|
324 |
+
else:
|
325 |
+
kwargs[name] = value
|
326 |
+
|
327 |
+
|
328 |
+
def _wrap(
|
329 |
+
orig, dim_offset=0, keepdim_offset=1, dim_name="dim", single_dim=False, reduce=True
|
330 |
+
):
|
331 |
+
from . import Dim, Tensor, TensorLike
|
332 |
+
|
333 |
+
def fn(self, *args, **kwargs):
|
334 |
+
dim = _getarg(dim_name, dim_offset, args, kwargs, _not_present)
|
335 |
+
if dim is _not_present or (single_dim and not isinstance(dim, Dim)):
|
336 |
+
with _enable_layers(self.dims):
|
337 |
+
print(f"dim fallback batch_tensor for {orig}")
|
338 |
+
return Tensor.from_batched(
|
339 |
+
orig(self._batchtensor, *args, **kwargs), self._has_device
|
340 |
+
)
|
341 |
+
keepdim = (
|
342 |
+
_getarg("keepdim", keepdim_offset, args, kwargs, False) if reduce else False
|
343 |
+
)
|
344 |
+
t, levels = self._tensor, llist(self._levels)
|
345 |
+
dims = _dims(dim, self._batchtensor.ndim, keepdim, single_dim)
|
346 |
+
dim_indices = tuple(levels.index(d) for d in dims)
|
347 |
+
if reduce and not keepdim:
|
348 |
+
new_levels = [l for i, l in enumerate(levels) if i not in dim_indices]
|
349 |
+
else:
|
350 |
+
new_levels = levels
|
351 |
+
|
352 |
+
if len(dim_indices) == 1:
|
353 |
+
dim_indices = dim_indices[
|
354 |
+
0
|
355 |
+
] # so that dims that really only take a single argument work...
|
356 |
+
args = list(args)
|
357 |
+
_patcharg(dim_name, dim_offset, args, kwargs, dim_indices)
|
358 |
+
|
359 |
+
def wrap(t):
|
360 |
+
if isinstance(t, TensorLike):
|
361 |
+
return Tensor.from_positional(t, new_levels, self._has_device)
|
362 |
+
return t
|
363 |
+
|
364 |
+
with _enable_layers(new_levels):
|
365 |
+
print(f"dim used batch_tensor for {orig}")
|
366 |
+
r = orig(t, *args, **kwargs)
|
367 |
+
return tree_map(wrap, r)
|
368 |
+
|
369 |
+
return fn
|
370 |
+
|
371 |
+
|
372 |
+
def _def(name, *args, **kwargs):
|
373 |
+
from . import _Tensor
|
374 |
+
|
375 |
+
orig = getattr(torch.Tensor, name)
|
376 |
+
setattr(_Tensor, name, _wrap(orig, *args, **kwargs))
|
377 |
+
|
378 |
+
|
379 |
+
no_slice = slice(None)
|
380 |
+
|
381 |
+
_orig_getitem = torch.Tensor.__getitem__
|
382 |
+
|
383 |
+
|
384 |
+
class dim_tracker:
|
385 |
+
def __init__(self):
|
386 |
+
self.dims = llist()
|
387 |
+
self.count = []
|
388 |
+
|
389 |
+
def record(self, d):
|
390 |
+
if d not in self.dims:
|
391 |
+
self.dims.append(d)
|
392 |
+
self.count.append(1)
|
393 |
+
|
394 |
+
def __getitem__(self, d):
|
395 |
+
return self.count[self.dims.index(d)]
|
396 |
+
|
397 |
+
|
398 |
+
def t__getitem__(self, input):
|
399 |
+
from . import _Tensor, Dim, DimensionBindError, DimList, Tensor, TensorLike
|
400 |
+
|
401 |
+
# * bail to original example if we have a single non-Dim tensor, or a non-tensor
|
402 |
+
# * locate ... or an unbound tensor list, and determine its size, bind dim list
|
403 |
+
# (remember that None does not count to the total dim count)
|
404 |
+
# * bind simple dims and dim-packs to their sizes, count the number of uses of each dim,
|
405 |
+
# produce the re-view if needed
|
406 |
+
# * for each single-use dim index, replace with no_slice and mark that it will be added
|
407 |
+
# (keep track of whether we have to call super)
|
408 |
+
# * call super if needed
|
409 |
+
# * if we have dims to bind, bind them (it will help if we eliminated ... and None before)
|
410 |
+
|
411 |
+
# this handles bool indexing handling, as well as some other simple cases.
|
412 |
+
|
413 |
+
is_simple = (
|
414 |
+
not isinstance(input, Dim)
|
415 |
+
and not isinstance(input, (tuple, list))
|
416 |
+
and
|
417 |
+
# WAR for functorch bug where zero time tensors in getitem are not handled correctly.
|
418 |
+
not (isinstance(input, TensorLike) and input.ndim == 0)
|
419 |
+
)
|
420 |
+
|
421 |
+
if is_simple:
|
422 |
+
if isinstance(self, _Tensor):
|
423 |
+
return _Tensor.__torch_function__(_orig_getitem, None, (self, input))
|
424 |
+
else:
|
425 |
+
return _orig_getitem(self, input)
|
426 |
+
|
427 |
+
# can further optimize this case
|
428 |
+
if not isinstance(input, tuple):
|
429 |
+
input = [input]
|
430 |
+
else:
|
431 |
+
input = list(input)
|
432 |
+
|
433 |
+
dims_indexed = 0
|
434 |
+
expanding_object = None
|
435 |
+
dimlists = []
|
436 |
+
for i, s in enumerate(input):
|
437 |
+
if s is ... or isinstance(s, DimList) and not s.is_bound:
|
438 |
+
if expanding_object is not None:
|
439 |
+
msg = (
|
440 |
+
"at most one ... or unbound dimension list can exist in indexing list but"
|
441 |
+
f" found 2 at offsets {i} and {expanding_object}"
|
442 |
+
)
|
443 |
+
raise DimensionBindError(msg)
|
444 |
+
expanding_object = i
|
445 |
+
|
446 |
+
if isinstance(s, DimList):
|
447 |
+
dims_indexed += len(s) if s.is_bound else 0
|
448 |
+
dimlists.append(i)
|
449 |
+
elif s is not None and s is not ...:
|
450 |
+
dims_indexed += 1
|
451 |
+
|
452 |
+
ndim = self.ndim
|
453 |
+
if dims_indexed > ndim:
|
454 |
+
raise IndexError(
|
455 |
+
f"at least {dims_indexed} indices were supplied but the tensor only has {ndim} dimensions."
|
456 |
+
)
|
457 |
+
if expanding_object is not None:
|
458 |
+
expanding_ndims = ndim - dims_indexed
|
459 |
+
obj = input[expanding_object]
|
460 |
+
if obj is ...:
|
461 |
+
input[expanding_object : expanding_object + 1] = [
|
462 |
+
no_slice
|
463 |
+
] * expanding_ndims
|
464 |
+
else:
|
465 |
+
obj.bind_len(expanding_ndims)
|
466 |
+
# flatten the dimslists into the indexing
|
467 |
+
for i in reversed(dimlists):
|
468 |
+
input[i : i + 1] = input[i]
|
469 |
+
dims_indexed = 0
|
470 |
+
requires_view = False
|
471 |
+
size = self.size()
|
472 |
+
view_sizes = []
|
473 |
+
dims_seen = dim_tracker()
|
474 |
+
|
475 |
+
def add_dims(t):
|
476 |
+
if not isinstance(t, _Tensor):
|
477 |
+
return
|
478 |
+
for d in t.dims:
|
479 |
+
dims_seen.record(d)
|
480 |
+
|
481 |
+
add_dims(self)
|
482 |
+
dim_packs = []
|
483 |
+
for i, idx in enumerate(input):
|
484 |
+
if idx is None:
|
485 |
+
input[i] = no_slice
|
486 |
+
view_sizes.append(1)
|
487 |
+
requires_view = True
|
488 |
+
else:
|
489 |
+
sz = size[dims_indexed]
|
490 |
+
if isinstance(idx, Dim):
|
491 |
+
idx.size = sz
|
492 |
+
dims_seen.record(idx)
|
493 |
+
view_sizes.append(sz)
|
494 |
+
elif isinstance(idx, (tuple, list)) and idx and isinstance(idx[0], Dim):
|
495 |
+
for d in idx:
|
496 |
+
dims_seen.record(idx)
|
497 |
+
_bind_dims_to_size(sz, idx, f"offset {i}")
|
498 |
+
view_sizes.extend(d.size for d in idx)
|
499 |
+
requires_view = True
|
500 |
+
dim_packs.append(i)
|
501 |
+
else:
|
502 |
+
add_dims(idx)
|
503 |
+
view_sizes.append(sz)
|
504 |
+
dims_indexed += 1
|
505 |
+
if requires_view:
|
506 |
+
self = self.view(*view_sizes)
|
507 |
+
for i in reversed(dim_packs):
|
508 |
+
input[i : i + 1] = input[i]
|
509 |
+
|
510 |
+
# currenty:
|
511 |
+
# input is flat, containing either Dim, or Tensor, or something valid for standard indexing
|
512 |
+
# self may have first-class dims as well.
|
513 |
+
|
514 |
+
# to index:
|
515 |
+
# drop the first class dims from self, they just become direct indices of their positions
|
516 |
+
|
517 |
+
# figure out the dimensions of the indexing tensors: union of all the dims in the tensors in the index.
|
518 |
+
# these dimensions will appear and need to be bound at the first place tensor occures
|
519 |
+
|
520 |
+
if isinstance(self, _Tensor):
|
521 |
+
ptensor_self, levels = self._tensor, list(self._levels)
|
522 |
+
# indices to ptensor rather than self which has first-class dimensions
|
523 |
+
input_it = iter(input)
|
524 |
+
flat_inputs = [next(input_it) if isinstance(l, int) else l for l in levels]
|
525 |
+
has_device = self._has_device
|
526 |
+
to_pad = 0
|
527 |
+
else:
|
528 |
+
ptensor_self, flat_inputs = self, input
|
529 |
+
to_pad = ptensor_self.ndim - len(flat_inputs)
|
530 |
+
has_device = True
|
531 |
+
|
532 |
+
result_levels = []
|
533 |
+
index_levels = []
|
534 |
+
tensor_insert_point = None
|
535 |
+
to_expand = {}
|
536 |
+
requires_getindex = False
|
537 |
+
for i, inp in enumerate(flat_inputs):
|
538 |
+
if isinstance(inp, Dim) and dims_seen[inp] == 1:
|
539 |
+
flat_inputs[i] = no_slice
|
540 |
+
result_levels.append(inp)
|
541 |
+
elif isinstance(inp, TensorLike):
|
542 |
+
requires_getindex = True
|
543 |
+
if tensor_insert_point is None:
|
544 |
+
tensor_insert_point = len(result_levels)
|
545 |
+
ptensor, levels, _ = _tensor_levels(inp)
|
546 |
+
to_expand[i] = levels
|
547 |
+
flat_inputs[i] = ptensor
|
548 |
+
for l in levels:
|
549 |
+
if l not in index_levels:
|
550 |
+
index_levels.append(l)
|
551 |
+
else:
|
552 |
+
requires_getindex = True
|
553 |
+
result_levels.append(0)
|
554 |
+
|
555 |
+
if tensor_insert_point is not None:
|
556 |
+
result_levels[tensor_insert_point:tensor_insert_point] = index_levels
|
557 |
+
|
558 |
+
for i, levels in to_expand.items():
|
559 |
+
flat_inputs[i] = _match_levels(flat_inputs[i], levels, index_levels)
|
560 |
+
|
561 |
+
if requires_getindex:
|
562 |
+
result = _orig_getitem(ptensor_self, flat_inputs)
|
563 |
+
else:
|
564 |
+
result = ptensor_self
|
565 |
+
|
566 |
+
next_positional = -1
|
567 |
+
if to_pad > 0:
|
568 |
+
result_levels.extend([0] * to_pad)
|
569 |
+
for i, r in enumerate(reversed(result_levels)):
|
570 |
+
if isinstance(r, int):
|
571 |
+
result_levels[-1 - i] = next_positional
|
572 |
+
next_positional -= 1
|
573 |
+
|
574 |
+
return Tensor.from_positional(result, result_levels, has_device)
|
575 |
+
|
576 |
+
|
577 |
+
# XXX - dim is optional and can be the outer-most dimension...
|
578 |
+
def stack(tensors, new_dim, dim=0, out=None):
|
579 |
+
if isinstance(dim, int):
|
580 |
+
return torch.stack(tensors, dim, out).index(dim, new_dim)
|
581 |
+
index = None
|
582 |
+
if out is not None:
|
583 |
+
out, index = _positional_no_permute(out, dim, expand_dim=True)
|
584 |
+
ptensors = []
|
585 |
+
for t in tensors:
|
586 |
+
pt, pi = _positional_no_permute(t, dim, expand_dim=True)
|
587 |
+
if index is not None and pi != index:
|
588 |
+
pt = pt.move_dim(pi, index)
|
589 |
+
else:
|
590 |
+
index = pi
|
591 |
+
ptensors.append(pt)
|
592 |
+
pr = torch.stack(ptensors, index, out=out)
|
593 |
+
return pr.index((index, index + 1), (new_dim, dim))
|
594 |
+
|
595 |
+
|
596 |
+
_orig_split = torch.Tensor.split
|
597 |
+
|
598 |
+
|
599 |
+
def split(self, split_size_or_sections, dim=0):
|
600 |
+
from . import _Tensor, Dim
|
601 |
+
|
602 |
+
if isinstance(split_size_or_sections, int) or any(
|
603 |
+
isinstance(t, int) for t in split_size_or_sections
|
604 |
+
):
|
605 |
+
if isinstance(dim, Dim):
|
606 |
+
raise ValueError(
|
607 |
+
"when dim is specified as a Dim object, split sizes must also be dimensions."
|
608 |
+
)
|
609 |
+
return _orig_split(self, split_size_or_sections, dim=dim)
|
610 |
+
|
611 |
+
if isinstance(dim, Dim):
|
612 |
+
assert isinstance(self, _Tensor), f"Tensor does not have dimension {dim}"
|
613 |
+
self, dim = _positional_no_permute(self, dim)
|
614 |
+
|
615 |
+
size = self.size(dim)
|
616 |
+
total_bound_size = 0
|
617 |
+
unbound = []
|
618 |
+
sizes = []
|
619 |
+
for i, d in enumerate(split_size_or_sections):
|
620 |
+
if d.is_bound:
|
621 |
+
sizes.append(d.size)
|
622 |
+
total_bound_size += d.size
|
623 |
+
else:
|
624 |
+
sizes.append(0)
|
625 |
+
unbound.append(i)
|
626 |
+
|
627 |
+
if unbound:
|
628 |
+
assert (
|
629 |
+
total_bound_size <= size
|
630 |
+
), f"result dimensions are larger than original: {total_bound_size} vs {size} ({split_size_or_sections})"
|
631 |
+
remaining_size = size - total_bound_size
|
632 |
+
chunk_size = -(-remaining_size // len(unbound))
|
633 |
+
for u in unbound:
|
634 |
+
sz = min(chunk_size, remaining_size)
|
635 |
+
split_size_or_sections[u].size = sz
|
636 |
+
sizes[u] = sz
|
637 |
+
remaining_size -= sz
|
638 |
+
else:
|
639 |
+
assert (
|
640 |
+
total_bound_size == size
|
641 |
+
), f"result dimensions do not match original: {total_bound_size} vs {size} ({split_size_or_sections})"
|
642 |
+
return tuple(
|
643 |
+
t.index(dim, d)
|
644 |
+
for d, t in zip(split_size_or_sections, _orig_split(self, sizes, dim=dim))
|
645 |
+
)
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/tree_map.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from functorch._C import dim
|
8 |
+
|
9 |
+
tree_flatten = dim.tree_flatten
|
10 |
+
|
11 |
+
|
12 |
+
def tree_map(fn, tree):
|
13 |
+
vs, unflatten = tree_flatten(tree)
|
14 |
+
return unflatten(fn(v) for v in vs)
|
env-llmeval/lib/python3.10/site-packages/functorch/dim/wrap_type.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
from types import (
|
8 |
+
BuiltinMethodType,
|
9 |
+
FunctionType,
|
10 |
+
GetSetDescriptorType,
|
11 |
+
MethodDescriptorType,
|
12 |
+
WrapperDescriptorType,
|
13 |
+
)
|
14 |
+
|
15 |
+
from functorch._C import dim as _C
|
16 |
+
|
17 |
+
_wrap_method = _C._wrap_method
|
18 |
+
|
19 |
+
FUNC_TYPES = (
|
20 |
+
FunctionType,
|
21 |
+
MethodDescriptorType,
|
22 |
+
BuiltinMethodType,
|
23 |
+
WrapperDescriptorType,
|
24 |
+
)
|
25 |
+
PROPERTY_TYPES = (GetSetDescriptorType, property)
|
26 |
+
|
27 |
+
|
28 |
+
def _py_wrap_method(orig, __torch_function__):
|
29 |
+
def impl(*args, **kwargs):
|
30 |
+
return __torch_function__(orig, None, args, kwargs)
|
31 |
+
|
32 |
+
return impl
|
33 |
+
|
34 |
+
|
35 |
+
def wrap_type(use_c, to_patch, pattern, __torch_function__):
|
36 |
+
if use_c:
|
37 |
+
wrap_method = _wrap_method
|
38 |
+
else:
|
39 |
+
wrap_method = _py_wrap_method
|
40 |
+
|
41 |
+
all = {}
|
42 |
+
for t in reversed(pattern.mro()[:-1]): # skip object
|
43 |
+
all.update(t.__dict__)
|
44 |
+
|
45 |
+
def wrap_attr(orig):
|
46 |
+
return property(wrap_method(orig.__get__, __torch_function__))
|
47 |
+
|
48 |
+
for name, obj in all.items():
|
49 |
+
if name in (
|
50 |
+
"__dict__",
|
51 |
+
"__new__",
|
52 |
+
"__init__",
|
53 |
+
"__repr__",
|
54 |
+
"__weakref__",
|
55 |
+
"__doc__",
|
56 |
+
"__module__",
|
57 |
+
"__dir__",
|
58 |
+
):
|
59 |
+
continue
|
60 |
+
|
61 |
+
# skip things that have been overloaded
|
62 |
+
# things that come from object like `__eq__` still need to be patched, however.
|
63 |
+
if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr(
|
64 |
+
object, name, None
|
65 |
+
):
|
66 |
+
continue
|
67 |
+
|
68 |
+
if isinstance(obj, FUNC_TYPES):
|
69 |
+
setattr(to_patch, name, wrap_method(obj, __torch_function__))
|
70 |
+
elif isinstance(obj, PROPERTY_TYPES):
|
71 |
+
setattr(to_patch, name, wrap_attr(obj))
|
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1626ff119582bca46605bc6d49769ab75314b9993dd647bd64a90dec747bc843
|
3 |
+
size 1534104
|
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa2587c8d211fbc85e8b88cca0bcebe78c8cc40c81b0c3763ce57ac9e63f0669
|
3 |
+
size 5895416
|
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83ec9ad7775e89f6280286ba11eb9d28cafe49c2f777a3e051bcc881de7449fc
|
3 |
+
size 56875328
|
env-llmeval/lib/python3.10/site-packages/pytz/lazy.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from threading import RLock
|
2 |
+
try:
|
3 |
+
from collections.abc import Mapping as DictMixin
|
4 |
+
except ImportError: # Python < 3.3
|
5 |
+
try:
|
6 |
+
from UserDict import DictMixin # Python 2
|
7 |
+
except ImportError: # Python 3.0-3.3
|
8 |
+
from collections import Mapping as DictMixin
|
9 |
+
|
10 |
+
|
11 |
+
# With lazy loading, we might end up with multiple threads triggering
|
12 |
+
# it at the same time. We need a lock.
|
13 |
+
_fill_lock = RLock()
|
14 |
+
|
15 |
+
|
16 |
+
class LazyDict(DictMixin):
|
17 |
+
"""Dictionary populated on first use."""
|
18 |
+
data = None
|
19 |
+
|
20 |
+
def __getitem__(self, key):
|
21 |
+
if self.data is None:
|
22 |
+
_fill_lock.acquire()
|
23 |
+
try:
|
24 |
+
if self.data is None:
|
25 |
+
self._fill()
|
26 |
+
finally:
|
27 |
+
_fill_lock.release()
|
28 |
+
return self.data[key.upper()]
|
29 |
+
|
30 |
+
def __contains__(self, key):
|
31 |
+
if self.data is None:
|
32 |
+
_fill_lock.acquire()
|
33 |
+
try:
|
34 |
+
if self.data is None:
|
35 |
+
self._fill()
|
36 |
+
finally:
|
37 |
+
_fill_lock.release()
|
38 |
+
return key in self.data
|
39 |
+
|
40 |
+
def __iter__(self):
|
41 |
+
if self.data is None:
|
42 |
+
_fill_lock.acquire()
|
43 |
+
try:
|
44 |
+
if self.data is None:
|
45 |
+
self._fill()
|
46 |
+
finally:
|
47 |
+
_fill_lock.release()
|
48 |
+
return iter(self.data)
|
49 |
+
|
50 |
+
def __len__(self):
|
51 |
+
if self.data is None:
|
52 |
+
_fill_lock.acquire()
|
53 |
+
try:
|
54 |
+
if self.data is None:
|
55 |
+
self._fill()
|
56 |
+
finally:
|
57 |
+
_fill_lock.release()
|
58 |
+
return len(self.data)
|
59 |
+
|
60 |
+
def keys(self):
|
61 |
+
if self.data is None:
|
62 |
+
_fill_lock.acquire()
|
63 |
+
try:
|
64 |
+
if self.data is None:
|
65 |
+
self._fill()
|
66 |
+
finally:
|
67 |
+
_fill_lock.release()
|
68 |
+
return self.data.keys()
|
69 |
+
|
70 |
+
|
71 |
+
class LazyList(list):
|
72 |
+
"""List populated on first use."""
|
73 |
+
|
74 |
+
_props = [
|
75 |
+
'__str__', '__repr__', '__unicode__',
|
76 |
+
'__hash__', '__sizeof__', '__cmp__',
|
77 |
+
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
|
78 |
+
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
|
79 |
+
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
|
80 |
+
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
|
81 |
+
'__getitem__', '__setitem__', '__delitem__', '__iter__',
|
82 |
+
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
|
83 |
+
|
84 |
+
def __new__(cls, fill_iter=None):
|
85 |
+
|
86 |
+
if fill_iter is None:
|
87 |
+
return list()
|
88 |
+
|
89 |
+
# We need a new class as we will be dynamically messing with its
|
90 |
+
# methods.
|
91 |
+
class LazyList(list):
|
92 |
+
pass
|
93 |
+
|
94 |
+
fill_iter = [fill_iter]
|
95 |
+
|
96 |
+
def lazy(name):
|
97 |
+
def _lazy(self, *args, **kw):
|
98 |
+
_fill_lock.acquire()
|
99 |
+
try:
|
100 |
+
if len(fill_iter) > 0:
|
101 |
+
list.extend(self, fill_iter.pop())
|
102 |
+
for method_name in cls._props:
|
103 |
+
delattr(LazyList, method_name)
|
104 |
+
finally:
|
105 |
+
_fill_lock.release()
|
106 |
+
return getattr(list, name)(self, *args, **kw)
|
107 |
+
return _lazy
|
108 |
+
|
109 |
+
for name in cls._props:
|
110 |
+
setattr(LazyList, name, lazy(name))
|
111 |
+
|
112 |
+
new_list = LazyList()
|
113 |
+
return new_list
|
114 |
+
|
115 |
+
# Not all versions of Python declare the same magic methods.
|
116 |
+
# Filter out properties that don't exist in this version of Python
|
117 |
+
# from the list.
|
118 |
+
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
|
119 |
+
|
120 |
+
|
121 |
+
class LazySet(set):
|
122 |
+
"""Set populated on first use."""
|
123 |
+
|
124 |
+
_props = (
|
125 |
+
'__str__', '__repr__', '__unicode__',
|
126 |
+
'__hash__', '__sizeof__', '__cmp__',
|
127 |
+
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
|
128 |
+
'__contains__', '__len__', '__nonzero__',
|
129 |
+
'__getitem__', '__setitem__', '__delitem__', '__iter__',
|
130 |
+
'__sub__', '__and__', '__xor__', '__or__',
|
131 |
+
'__rsub__', '__rand__', '__rxor__', '__ror__',
|
132 |
+
'__isub__', '__iand__', '__ixor__', '__ior__',
|
133 |
+
'add', 'clear', 'copy', 'difference', 'difference_update',
|
134 |
+
'discard', 'intersection', 'intersection_update', 'isdisjoint',
|
135 |
+
'issubset', 'issuperset', 'pop', 'remove',
|
136 |
+
'symmetric_difference', 'symmetric_difference_update',
|
137 |
+
'union', 'update')
|
138 |
+
|
139 |
+
def __new__(cls, fill_iter=None):
|
140 |
+
|
141 |
+
if fill_iter is None:
|
142 |
+
return set()
|
143 |
+
|
144 |
+
class LazySet(set):
|
145 |
+
pass
|
146 |
+
|
147 |
+
fill_iter = [fill_iter]
|
148 |
+
|
149 |
+
def lazy(name):
|
150 |
+
def _lazy(self, *args, **kw):
|
151 |
+
_fill_lock.acquire()
|
152 |
+
try:
|
153 |
+
if len(fill_iter) > 0:
|
154 |
+
for i in fill_iter.pop():
|
155 |
+
set.add(self, i)
|
156 |
+
for method_name in cls._props:
|
157 |
+
delattr(LazySet, method_name)
|
158 |
+
finally:
|
159 |
+
_fill_lock.release()
|
160 |
+
return getattr(set, name)(self, *args, **kw)
|
161 |
+
return _lazy
|
162 |
+
|
163 |
+
for name in cls._props:
|
164 |
+
setattr(LazySet, name, lazy(name))
|
165 |
+
|
166 |
+
new_set = LazySet()
|
167 |
+
return new_set
|
168 |
+
|
169 |
+
# Not all versions of Python declare the same magic methods.
|
170 |
+
# Filter out properties that don't exist in this version of Python
|
171 |
+
# from the list.
|
172 |
+
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
|
env-llmeval/lib/python3.10/site-packages/pytz/reference.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Reference tzinfo implementations from the Python docs.
|
3 |
+
Used for testing against as they are only correct for the years
|
4 |
+
1987 to 2006. Do not use these for real code.
|
5 |
+
'''
|
6 |
+
|
7 |
+
from datetime import tzinfo, timedelta, datetime
|
8 |
+
from pytz import HOUR, ZERO, UTC
|
9 |
+
|
10 |
+
__all__ = [
|
11 |
+
'FixedOffset',
|
12 |
+
'LocalTimezone',
|
13 |
+
'USTimeZone',
|
14 |
+
'Eastern',
|
15 |
+
'Central',
|
16 |
+
'Mountain',
|
17 |
+
'Pacific',
|
18 |
+
'UTC'
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
# A class building tzinfo objects for fixed-offset time zones.
|
23 |
+
# Note that FixedOffset(0, "UTC") is a different way to build a
|
24 |
+
# UTC tzinfo object.
|
25 |
+
class FixedOffset(tzinfo):
|
26 |
+
"""Fixed offset in minutes east from UTC."""
|
27 |
+
|
28 |
+
def __init__(self, offset, name):
|
29 |
+
self.__offset = timedelta(minutes=offset)
|
30 |
+
self.__name = name
|
31 |
+
|
32 |
+
def utcoffset(self, dt):
|
33 |
+
return self.__offset
|
34 |
+
|
35 |
+
def tzname(self, dt):
|
36 |
+
return self.__name
|
37 |
+
|
38 |
+
def dst(self, dt):
|
39 |
+
return ZERO
|
40 |
+
|
41 |
+
|
42 |
+
import time as _time
|
43 |
+
|
44 |
+
STDOFFSET = timedelta(seconds=-_time.timezone)
|
45 |
+
if _time.daylight:
|
46 |
+
DSTOFFSET = timedelta(seconds=-_time.altzone)
|
47 |
+
else:
|
48 |
+
DSTOFFSET = STDOFFSET
|
49 |
+
|
50 |
+
DSTDIFF = DSTOFFSET - STDOFFSET
|
51 |
+
|
52 |
+
|
53 |
+
# A class capturing the platform's idea of local time.
|
54 |
+
class LocalTimezone(tzinfo):
|
55 |
+
|
56 |
+
def utcoffset(self, dt):
|
57 |
+
if self._isdst(dt):
|
58 |
+
return DSTOFFSET
|
59 |
+
else:
|
60 |
+
return STDOFFSET
|
61 |
+
|
62 |
+
def dst(self, dt):
|
63 |
+
if self._isdst(dt):
|
64 |
+
return DSTDIFF
|
65 |
+
else:
|
66 |
+
return ZERO
|
67 |
+
|
68 |
+
def tzname(self, dt):
|
69 |
+
return _time.tzname[self._isdst(dt)]
|
70 |
+
|
71 |
+
def _isdst(self, dt):
|
72 |
+
tt = (dt.year, dt.month, dt.day,
|
73 |
+
dt.hour, dt.minute, dt.second,
|
74 |
+
dt.weekday(), 0, -1)
|
75 |
+
stamp = _time.mktime(tt)
|
76 |
+
tt = _time.localtime(stamp)
|
77 |
+
return tt.tm_isdst > 0
|
78 |
+
|
79 |
+
Local = LocalTimezone()
|
80 |
+
|
81 |
+
|
82 |
+
def first_sunday_on_or_after(dt):
|
83 |
+
days_to_go = 6 - dt.weekday()
|
84 |
+
if days_to_go:
|
85 |
+
dt += timedelta(days_to_go)
|
86 |
+
return dt
|
87 |
+
|
88 |
+
|
89 |
+
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
|
90 |
+
DSTSTART = datetime(1, 4, 1, 2)
|
91 |
+
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
|
92 |
+
# which is the first Sunday on or after Oct 25.
|
93 |
+
DSTEND = datetime(1, 10, 25, 1)
|
94 |
+
|
95 |
+
|
96 |
+
# A complete implementation of current DST rules for major US time zones.
|
97 |
+
class USTimeZone(tzinfo):
|
98 |
+
|
99 |
+
def __init__(self, hours, reprname, stdname, dstname):
|
100 |
+
self.stdoffset = timedelta(hours=hours)
|
101 |
+
self.reprname = reprname
|
102 |
+
self.stdname = stdname
|
103 |
+
self.dstname = dstname
|
104 |
+
|
105 |
+
def __repr__(self):
|
106 |
+
return self.reprname
|
107 |
+
|
108 |
+
def tzname(self, dt):
|
109 |
+
if self.dst(dt):
|
110 |
+
return self.dstname
|
111 |
+
else:
|
112 |
+
return self.stdname
|
113 |
+
|
114 |
+
def utcoffset(self, dt):
|
115 |
+
return self.stdoffset + self.dst(dt)
|
116 |
+
|
117 |
+
def dst(self, dt):
|
118 |
+
if dt is None or dt.tzinfo is None:
|
119 |
+
# An exception may be sensible here, in one or both cases.
|
120 |
+
# It depends on how you want to treat them. The default
|
121 |
+
# fromutc() implementation (called by the default astimezone()
|
122 |
+
# implementation) passes a datetime with dt.tzinfo is self.
|
123 |
+
return ZERO
|
124 |
+
assert dt.tzinfo is self
|
125 |
+
|
126 |
+
# Find first Sunday in April & the last in October.
|
127 |
+
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
|
128 |
+
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
|
129 |
+
|
130 |
+
# Can't compare naive to aware objects, so strip the timezone from
|
131 |
+
# dt first.
|
132 |
+
if start <= dt.replace(tzinfo=None) < end:
|
133 |
+
return HOUR
|
134 |
+
else:
|
135 |
+
return ZERO
|
136 |
+
|
137 |
+
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
|
138 |
+
Central = USTimeZone(-6, "Central", "CST", "CDT")
|
139 |
+
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
|
140 |
+
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
|
env-llmeval/lib/python3.10/site-packages/pytz/tzinfo.py
ADDED
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''Base classes and helpers for building zone specific tzinfo classes'''
|
2 |
+
|
3 |
+
from datetime import datetime, timedelta, tzinfo
|
4 |
+
from bisect import bisect_right
|
5 |
+
try:
|
6 |
+
set
|
7 |
+
except NameError:
|
8 |
+
from sets import Set as set
|
9 |
+
|
10 |
+
import pytz
|
11 |
+
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
|
12 |
+
|
13 |
+
__all__ = []
|
14 |
+
|
15 |
+
_timedelta_cache = {}
|
16 |
+
|
17 |
+
|
18 |
+
def memorized_timedelta(seconds):
|
19 |
+
'''Create only one instance of each distinct timedelta'''
|
20 |
+
try:
|
21 |
+
return _timedelta_cache[seconds]
|
22 |
+
except KeyError:
|
23 |
+
delta = timedelta(seconds=seconds)
|
24 |
+
_timedelta_cache[seconds] = delta
|
25 |
+
return delta
|
26 |
+
|
27 |
+
|
28 |
+
_epoch = datetime(1970, 1, 1, 0, 0) # datetime.utcfromtimestamp(0)
|
29 |
+
_datetime_cache = {0: _epoch}
|
30 |
+
|
31 |
+
|
32 |
+
def memorized_datetime(seconds):
|
33 |
+
'''Create only one instance of each distinct datetime'''
|
34 |
+
try:
|
35 |
+
return _datetime_cache[seconds]
|
36 |
+
except KeyError:
|
37 |
+
# NB. We can't just do datetime.fromtimestamp(seconds, tz=timezone.utc).replace(tzinfo=None)
|
38 |
+
# as this fails with negative values under Windows (Bug #90096)
|
39 |
+
dt = _epoch + timedelta(seconds=seconds)
|
40 |
+
_datetime_cache[seconds] = dt
|
41 |
+
return dt
|
42 |
+
|
43 |
+
|
44 |
+
_ttinfo_cache = {}
|
45 |
+
|
46 |
+
|
47 |
+
def memorized_ttinfo(*args):
|
48 |
+
'''Create only one instance of each distinct tuple'''
|
49 |
+
try:
|
50 |
+
return _ttinfo_cache[args]
|
51 |
+
except KeyError:
|
52 |
+
ttinfo = (
|
53 |
+
memorized_timedelta(args[0]),
|
54 |
+
memorized_timedelta(args[1]),
|
55 |
+
args[2]
|
56 |
+
)
|
57 |
+
_ttinfo_cache[args] = ttinfo
|
58 |
+
return ttinfo
|
59 |
+
|
60 |
+
|
61 |
+
_notime = memorized_timedelta(0)
|
62 |
+
|
63 |
+
|
64 |
+
def _to_seconds(td):
|
65 |
+
'''Convert a timedelta to seconds'''
|
66 |
+
return td.seconds + td.days * 24 * 60 * 60
|
67 |
+
|
68 |
+
|
69 |
+
class BaseTzInfo(tzinfo):
|
70 |
+
# Overridden in subclass
|
71 |
+
_utcoffset = None
|
72 |
+
_tzname = None
|
73 |
+
zone = None
|
74 |
+
|
75 |
+
def __str__(self):
|
76 |
+
return self.zone
|
77 |
+
|
78 |
+
|
79 |
+
class StaticTzInfo(BaseTzInfo):
|
80 |
+
'''A timezone that has a constant offset from UTC
|
81 |
+
|
82 |
+
These timezones are rare, as most locations have changed their
|
83 |
+
offset at some point in their history
|
84 |
+
'''
|
85 |
+
def fromutc(self, dt):
|
86 |
+
'''See datetime.tzinfo.fromutc'''
|
87 |
+
if dt.tzinfo is not None and dt.tzinfo is not self:
|
88 |
+
raise ValueError('fromutc: dt.tzinfo is not self')
|
89 |
+
return (dt + self._utcoffset).replace(tzinfo=self)
|
90 |
+
|
91 |
+
def utcoffset(self, dt, is_dst=None):
|
92 |
+
'''See datetime.tzinfo.utcoffset
|
93 |
+
|
94 |
+
is_dst is ignored for StaticTzInfo, and exists only to
|
95 |
+
retain compatibility with DstTzInfo.
|
96 |
+
'''
|
97 |
+
return self._utcoffset
|
98 |
+
|
99 |
+
def dst(self, dt, is_dst=None):
|
100 |
+
'''See datetime.tzinfo.dst
|
101 |
+
|
102 |
+
is_dst is ignored for StaticTzInfo, and exists only to
|
103 |
+
retain compatibility with DstTzInfo.
|
104 |
+
'''
|
105 |
+
return _notime
|
106 |
+
|
107 |
+
def tzname(self, dt, is_dst=None):
|
108 |
+
'''See datetime.tzinfo.tzname
|
109 |
+
|
110 |
+
is_dst is ignored for StaticTzInfo, and exists only to
|
111 |
+
retain compatibility with DstTzInfo.
|
112 |
+
'''
|
113 |
+
return self._tzname
|
114 |
+
|
115 |
+
def localize(self, dt, is_dst=False):
|
116 |
+
'''Convert naive time to local time'''
|
117 |
+
if dt.tzinfo is not None:
|
118 |
+
raise ValueError('Not naive datetime (tzinfo is already set)')
|
119 |
+
return dt.replace(tzinfo=self)
|
120 |
+
|
121 |
+
def normalize(self, dt, is_dst=False):
|
122 |
+
'''Correct the timezone information on the given datetime.
|
123 |
+
|
124 |
+
This is normally a no-op, as StaticTzInfo timezones never have
|
125 |
+
ambiguous cases to correct:
|
126 |
+
|
127 |
+
>>> from pytz import timezone
|
128 |
+
>>> gmt = timezone('GMT')
|
129 |
+
>>> isinstance(gmt, StaticTzInfo)
|
130 |
+
True
|
131 |
+
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
|
132 |
+
>>> gmt.normalize(dt) is dt
|
133 |
+
True
|
134 |
+
|
135 |
+
The supported method of converting between timezones is to use
|
136 |
+
datetime.astimezone(). Currently normalize() also works:
|
137 |
+
|
138 |
+
>>> la = timezone('America/Los_Angeles')
|
139 |
+
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
|
140 |
+
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
141 |
+
>>> gmt.normalize(dt).strftime(fmt)
|
142 |
+
'2011-05-07 08:02:03 GMT (+0000)'
|
143 |
+
'''
|
144 |
+
if dt.tzinfo is self:
|
145 |
+
return dt
|
146 |
+
if dt.tzinfo is None:
|
147 |
+
raise ValueError('Naive time - no tzinfo set')
|
148 |
+
return dt.astimezone(self)
|
149 |
+
|
150 |
+
def __repr__(self):
|
151 |
+
return '<StaticTzInfo %r>' % (self.zone,)
|
152 |
+
|
153 |
+
def __reduce__(self):
|
154 |
+
# Special pickle to zone remains a singleton and to cope with
|
155 |
+
# database changes.
|
156 |
+
return pytz._p, (self.zone,)
|
157 |
+
|
158 |
+
|
159 |
+
class DstTzInfo(BaseTzInfo):
|
160 |
+
'''A timezone that has a variable offset from UTC
|
161 |
+
|
162 |
+
The offset might change if daylight saving time comes into effect,
|
163 |
+
or at a point in history when the region decides to change their
|
164 |
+
timezone definition.
|
165 |
+
'''
|
166 |
+
# Overridden in subclass
|
167 |
+
|
168 |
+
# Sorted list of DST transition times, UTC
|
169 |
+
_utc_transition_times = None
|
170 |
+
|
171 |
+
# [(utcoffset, dstoffset, tzname)] corresponding to
|
172 |
+
# _utc_transition_times entries
|
173 |
+
_transition_info = None
|
174 |
+
|
175 |
+
zone = None
|
176 |
+
|
177 |
+
# Set in __init__
|
178 |
+
|
179 |
+
_tzinfos = None
|
180 |
+
_dst = None # DST offset
|
181 |
+
|
182 |
+
def __init__(self, _inf=None, _tzinfos=None):
|
183 |
+
if _inf:
|
184 |
+
self._tzinfos = _tzinfos
|
185 |
+
self._utcoffset, self._dst, self._tzname = _inf
|
186 |
+
else:
|
187 |
+
_tzinfos = {}
|
188 |
+
self._tzinfos = _tzinfos
|
189 |
+
self._utcoffset, self._dst, self._tzname = (
|
190 |
+
self._transition_info[0])
|
191 |
+
_tzinfos[self._transition_info[0]] = self
|
192 |
+
for inf in self._transition_info[1:]:
|
193 |
+
if inf not in _tzinfos:
|
194 |
+
_tzinfos[inf] = self.__class__(inf, _tzinfos)
|
195 |
+
|
196 |
+
def fromutc(self, dt):
|
197 |
+
'''See datetime.tzinfo.fromutc'''
|
198 |
+
if (dt.tzinfo is not None and
|
199 |
+
getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
|
200 |
+
raise ValueError('fromutc: dt.tzinfo is not self')
|
201 |
+
dt = dt.replace(tzinfo=None)
|
202 |
+
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
|
203 |
+
inf = self._transition_info[idx]
|
204 |
+
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
|
205 |
+
|
206 |
+
def normalize(self, dt):
|
207 |
+
'''Correct the timezone information on the given datetime
|
208 |
+
|
209 |
+
If date arithmetic crosses DST boundaries, the tzinfo
|
210 |
+
is not magically adjusted. This method normalizes the
|
211 |
+
tzinfo to the correct one.
|
212 |
+
|
213 |
+
To test, first we need to do some setup
|
214 |
+
|
215 |
+
>>> from pytz import timezone
|
216 |
+
>>> utc = timezone('UTC')
|
217 |
+
>>> eastern = timezone('US/Eastern')
|
218 |
+
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
219 |
+
|
220 |
+
We next create a datetime right on an end-of-DST transition point,
|
221 |
+
the instant when the wallclocks are wound back one hour.
|
222 |
+
|
223 |
+
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
|
224 |
+
>>> loc_dt = utc_dt.astimezone(eastern)
|
225 |
+
>>> loc_dt.strftime(fmt)
|
226 |
+
'2002-10-27 01:00:00 EST (-0500)'
|
227 |
+
|
228 |
+
Now, if we subtract a few minutes from it, note that the timezone
|
229 |
+
information has not changed.
|
230 |
+
|
231 |
+
>>> before = loc_dt - timedelta(minutes=10)
|
232 |
+
>>> before.strftime(fmt)
|
233 |
+
'2002-10-27 00:50:00 EST (-0500)'
|
234 |
+
|
235 |
+
But we can fix that by calling the normalize method
|
236 |
+
|
237 |
+
>>> before = eastern.normalize(before)
|
238 |
+
>>> before.strftime(fmt)
|
239 |
+
'2002-10-27 01:50:00 EDT (-0400)'
|
240 |
+
|
241 |
+
The supported method of converting between timezones is to use
|
242 |
+
datetime.astimezone(). Currently, normalize() also works:
|
243 |
+
|
244 |
+
>>> th = timezone('Asia/Bangkok')
|
245 |
+
>>> am = timezone('Europe/Amsterdam')
|
246 |
+
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
|
247 |
+
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
248 |
+
>>> am.normalize(dt).strftime(fmt)
|
249 |
+
'2011-05-06 20:02:03 CEST (+0200)'
|
250 |
+
'''
|
251 |
+
if dt.tzinfo is None:
|
252 |
+
raise ValueError('Naive time - no tzinfo set')
|
253 |
+
|
254 |
+
# Convert dt in localtime to UTC
|
255 |
+
offset = dt.tzinfo._utcoffset
|
256 |
+
dt = dt.replace(tzinfo=None)
|
257 |
+
dt = dt - offset
|
258 |
+
# convert it back, and return it
|
259 |
+
return self.fromutc(dt)
|
260 |
+
|
261 |
+
def localize(self, dt, is_dst=False):
|
262 |
+
'''Convert naive time to local time.
|
263 |
+
|
264 |
+
This method should be used to construct localtimes, rather
|
265 |
+
than passing a tzinfo argument to a datetime constructor.
|
266 |
+
|
267 |
+
is_dst is used to determine the correct timezone in the ambigous
|
268 |
+
period at the end of daylight saving time.
|
269 |
+
|
270 |
+
>>> from pytz import timezone
|
271 |
+
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
|
272 |
+
>>> amdam = timezone('Europe/Amsterdam')
|
273 |
+
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
|
274 |
+
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
|
275 |
+
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
|
276 |
+
>>> loc_dt1.strftime(fmt)
|
277 |
+
'2004-10-31 02:00:00 CEST (+0200)'
|
278 |
+
>>> loc_dt2.strftime(fmt)
|
279 |
+
'2004-10-31 02:00:00 CET (+0100)'
|
280 |
+
>>> str(loc_dt2 - loc_dt1)
|
281 |
+
'1:00:00'
|
282 |
+
|
283 |
+
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
|
284 |
+
times at the end of daylight saving time
|
285 |
+
|
286 |
+
>>> try:
|
287 |
+
... loc_dt1 = amdam.localize(dt, is_dst=None)
|
288 |
+
... except AmbiguousTimeError:
|
289 |
+
... print('Ambiguous')
|
290 |
+
Ambiguous
|
291 |
+
|
292 |
+
is_dst defaults to False
|
293 |
+
|
294 |
+
>>> amdam.localize(dt) == amdam.localize(dt, False)
|
295 |
+
True
|
296 |
+
|
297 |
+
is_dst is also used to determine the correct timezone in the
|
298 |
+
wallclock times jumped over at the start of daylight saving time.
|
299 |
+
|
300 |
+
>>> pacific = timezone('US/Pacific')
|
301 |
+
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
|
302 |
+
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
|
303 |
+
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
|
304 |
+
>>> ploc_dt1.strftime(fmt)
|
305 |
+
'2008-03-09 02:00:00 PDT (-0700)'
|
306 |
+
>>> ploc_dt2.strftime(fmt)
|
307 |
+
'2008-03-09 02:00:00 PST (-0800)'
|
308 |
+
>>> str(ploc_dt2 - ploc_dt1)
|
309 |
+
'1:00:00'
|
310 |
+
|
311 |
+
Use is_dst=None to raise a NonExistentTimeError for these skipped
|
312 |
+
times.
|
313 |
+
|
314 |
+
>>> try:
|
315 |
+
... loc_dt1 = pacific.localize(dt, is_dst=None)
|
316 |
+
... except NonExistentTimeError:
|
317 |
+
... print('Non-existent')
|
318 |
+
Non-existent
|
319 |
+
'''
|
320 |
+
if dt.tzinfo is not None:
|
321 |
+
raise ValueError('Not naive datetime (tzinfo is already set)')
|
322 |
+
|
323 |
+
# Find the two best possibilities.
|
324 |
+
possible_loc_dt = set()
|
325 |
+
for delta in [timedelta(days=-1), timedelta(days=1)]:
|
326 |
+
loc_dt = dt + delta
|
327 |
+
idx = max(0, bisect_right(
|
328 |
+
self._utc_transition_times, loc_dt) - 1)
|
329 |
+
inf = self._transition_info[idx]
|
330 |
+
tzinfo = self._tzinfos[inf]
|
331 |
+
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
|
332 |
+
if loc_dt.replace(tzinfo=None) == dt:
|
333 |
+
possible_loc_dt.add(loc_dt)
|
334 |
+
|
335 |
+
if len(possible_loc_dt) == 1:
|
336 |
+
return possible_loc_dt.pop()
|
337 |
+
|
338 |
+
# If there are no possibly correct timezones, we are attempting
|
339 |
+
# to convert a time that never happened - the time period jumped
|
340 |
+
# during the start-of-DST transition period.
|
341 |
+
if len(possible_loc_dt) == 0:
|
342 |
+
# If we refuse to guess, raise an exception.
|
343 |
+
if is_dst is None:
|
344 |
+
raise NonExistentTimeError(dt)
|
345 |
+
|
346 |
+
# If we are forcing the pre-DST side of the DST transition, we
|
347 |
+
# obtain the correct timezone by winding the clock forward a few
|
348 |
+
# hours.
|
349 |
+
elif is_dst:
|
350 |
+
return self.localize(
|
351 |
+
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
|
352 |
+
|
353 |
+
# If we are forcing the post-DST side of the DST transition, we
|
354 |
+
# obtain the correct timezone by winding the clock back.
|
355 |
+
else:
|
356 |
+
return self.localize(
|
357 |
+
dt - timedelta(hours=6),
|
358 |
+
is_dst=False) + timedelta(hours=6)
|
359 |
+
|
360 |
+
# If we get this far, we have multiple possible timezones - this
|
361 |
+
# is an ambiguous case occurring during the end-of-DST transition.
|
362 |
+
|
363 |
+
# If told to be strict, raise an exception since we have an
|
364 |
+
# ambiguous case
|
365 |
+
if is_dst is None:
|
366 |
+
raise AmbiguousTimeError(dt)
|
367 |
+
|
368 |
+
# Filter out the possiblilities that don't match the requested
|
369 |
+
# is_dst
|
370 |
+
filtered_possible_loc_dt = [
|
371 |
+
p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst
|
372 |
+
]
|
373 |
+
|
374 |
+
# Hopefully we only have one possibility left. Return it.
|
375 |
+
if len(filtered_possible_loc_dt) == 1:
|
376 |
+
return filtered_possible_loc_dt[0]
|
377 |
+
|
378 |
+
if len(filtered_possible_loc_dt) == 0:
|
379 |
+
filtered_possible_loc_dt = list(possible_loc_dt)
|
380 |
+
|
381 |
+
# If we get this far, we have in a wierd timezone transition
|
382 |
+
# where the clocks have been wound back but is_dst is the same
|
383 |
+
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
|
384 |
+
# At this point, we just have to guess unless we allow more
|
385 |
+
# hints to be passed in (such as the UTC offset or abbreviation),
|
386 |
+
# but that is just getting silly.
|
387 |
+
#
|
388 |
+
# Choose the earliest (by UTC) applicable timezone if is_dst=True
|
389 |
+
# Choose the latest (by UTC) applicable timezone if is_dst=False
|
390 |
+
# i.e., behave like end-of-DST transition
|
391 |
+
dates = {} # utc -> local
|
392 |
+
for local_dt in filtered_possible_loc_dt:
|
393 |
+
utc_time = (
|
394 |
+
local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset)
|
395 |
+
assert utc_time not in dates
|
396 |
+
dates[utc_time] = local_dt
|
397 |
+
return dates[[min, max][not is_dst](dates)]
|
398 |
+
|
399 |
+
def utcoffset(self, dt, is_dst=None):
|
400 |
+
'''See datetime.tzinfo.utcoffset
|
401 |
+
|
402 |
+
The is_dst parameter may be used to remove ambiguity during DST
|
403 |
+
transitions.
|
404 |
+
|
405 |
+
>>> from pytz import timezone
|
406 |
+
>>> tz = timezone('America/St_Johns')
|
407 |
+
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
|
408 |
+
|
409 |
+
>>> str(tz.utcoffset(ambiguous, is_dst=False))
|
410 |
+
'-1 day, 20:30:00'
|
411 |
+
|
412 |
+
>>> str(tz.utcoffset(ambiguous, is_dst=True))
|
413 |
+
'-1 day, 21:30:00'
|
414 |
+
|
415 |
+
>>> try:
|
416 |
+
... tz.utcoffset(ambiguous)
|
417 |
+
... except AmbiguousTimeError:
|
418 |
+
... print('Ambiguous')
|
419 |
+
Ambiguous
|
420 |
+
|
421 |
+
'''
|
422 |
+
if dt is None:
|
423 |
+
return None
|
424 |
+
elif dt.tzinfo is not self:
|
425 |
+
dt = self.localize(dt, is_dst)
|
426 |
+
return dt.tzinfo._utcoffset
|
427 |
+
else:
|
428 |
+
return self._utcoffset
|
429 |
+
|
430 |
+
def dst(self, dt, is_dst=None):
|
431 |
+
'''See datetime.tzinfo.dst
|
432 |
+
|
433 |
+
The is_dst parameter may be used to remove ambiguity during DST
|
434 |
+
transitions.
|
435 |
+
|
436 |
+
>>> from pytz import timezone
|
437 |
+
>>> tz = timezone('America/St_Johns')
|
438 |
+
|
439 |
+
>>> normal = datetime(2009, 9, 1)
|
440 |
+
|
441 |
+
>>> str(tz.dst(normal))
|
442 |
+
'1:00:00'
|
443 |
+
>>> str(tz.dst(normal, is_dst=False))
|
444 |
+
'1:00:00'
|
445 |
+
>>> str(tz.dst(normal, is_dst=True))
|
446 |
+
'1:00:00'
|
447 |
+
|
448 |
+
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
|
449 |
+
|
450 |
+
>>> str(tz.dst(ambiguous, is_dst=False))
|
451 |
+
'0:00:00'
|
452 |
+
>>> str(tz.dst(ambiguous, is_dst=True))
|
453 |
+
'1:00:00'
|
454 |
+
>>> try:
|
455 |
+
... tz.dst(ambiguous)
|
456 |
+
... except AmbiguousTimeError:
|
457 |
+
... print('Ambiguous')
|
458 |
+
Ambiguous
|
459 |
+
|
460 |
+
'''
|
461 |
+
if dt is None:
|
462 |
+
return None
|
463 |
+
elif dt.tzinfo is not self:
|
464 |
+
dt = self.localize(dt, is_dst)
|
465 |
+
return dt.tzinfo._dst
|
466 |
+
else:
|
467 |
+
return self._dst
|
468 |
+
|
469 |
+
def tzname(self, dt, is_dst=None):
|
470 |
+
'''See datetime.tzinfo.tzname
|
471 |
+
|
472 |
+
The is_dst parameter may be used to remove ambiguity during DST
|
473 |
+
transitions.
|
474 |
+
|
475 |
+
>>> from pytz import timezone
|
476 |
+
>>> tz = timezone('America/St_Johns')
|
477 |
+
|
478 |
+
>>> normal = datetime(2009, 9, 1)
|
479 |
+
|
480 |
+
>>> tz.tzname(normal)
|
481 |
+
'NDT'
|
482 |
+
>>> tz.tzname(normal, is_dst=False)
|
483 |
+
'NDT'
|
484 |
+
>>> tz.tzname(normal, is_dst=True)
|
485 |
+
'NDT'
|
486 |
+
|
487 |
+
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
|
488 |
+
|
489 |
+
>>> tz.tzname(ambiguous, is_dst=False)
|
490 |
+
'NST'
|
491 |
+
>>> tz.tzname(ambiguous, is_dst=True)
|
492 |
+
'NDT'
|
493 |
+
>>> try:
|
494 |
+
... tz.tzname(ambiguous)
|
495 |
+
... except AmbiguousTimeError:
|
496 |
+
... print('Ambiguous')
|
497 |
+
Ambiguous
|
498 |
+
'''
|
499 |
+
if dt is None:
|
500 |
+
return self.zone
|
501 |
+
elif dt.tzinfo is not self:
|
502 |
+
dt = self.localize(dt, is_dst)
|
503 |
+
return dt.tzinfo._tzname
|
504 |
+
else:
|
505 |
+
return self._tzname
|
506 |
+
|
507 |
+
def __repr__(self):
|
508 |
+
if self._dst:
|
509 |
+
dst = 'DST'
|
510 |
+
else:
|
511 |
+
dst = 'STD'
|
512 |
+
if self._utcoffset > _notime:
|
513 |
+
return '<DstTzInfo %r %s+%s %s>' % (
|
514 |
+
self.zone, self._tzname, self._utcoffset, dst
|
515 |
+
)
|
516 |
+
else:
|
517 |
+
return '<DstTzInfo %r %s%s %s>' % (
|
518 |
+
self.zone, self._tzname, self._utcoffset, dst
|
519 |
+
)
|
520 |
+
|
521 |
+
def __reduce__(self):
|
522 |
+
# Special pickle to zone remains a singleton and to cope with
|
523 |
+
# database changes.
|
524 |
+
return pytz._p, (
|
525 |
+
self.zone,
|
526 |
+
_to_seconds(self._utcoffset),
|
527 |
+
_to_seconds(self._dst),
|
528 |
+
self._tzname
|
529 |
+
)
|
530 |
+
|
531 |
+
|
532 |
+
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
|
533 |
+
"""Factory function for unpickling pytz tzinfo instances.
|
534 |
+
|
535 |
+
This is shared for both StaticTzInfo and DstTzInfo instances, because
|
536 |
+
database changes could cause a zones implementation to switch between
|
537 |
+
these two base classes and we can't break pickles on a pytz version
|
538 |
+
upgrade.
|
539 |
+
"""
|
540 |
+
# Raises a KeyError if zone no longer exists, which should never happen
|
541 |
+
# and would be a bug.
|
542 |
+
tz = pytz.timezone(zone)
|
543 |
+
|
544 |
+
# A StaticTzInfo - just return it
|
545 |
+
if utcoffset is None:
|
546 |
+
return tz
|
547 |
+
|
548 |
+
# This pickle was created from a DstTzInfo. We need to
|
549 |
+
# determine which of the list of tzinfo instances for this zone
|
550 |
+
# to use in order to restore the state of any datetime instances using
|
551 |
+
# it correctly.
|
552 |
+
utcoffset = memorized_timedelta(utcoffset)
|
553 |
+
dstoffset = memorized_timedelta(dstoffset)
|
554 |
+
try:
|
555 |
+
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
|
556 |
+
except KeyError:
|
557 |
+
# The particular state requested in this timezone no longer exists.
|
558 |
+
# This indicates a corrupt pickle, or the timezone database has been
|
559 |
+
# corrected violently enough to make this particular
|
560 |
+
# (utcoffset,dstoffset) no longer exist in the zone, or the
|
561 |
+
# abbreviation has been changed.
|
562 |
+
pass
|
563 |
+
|
564 |
+
# See if we can find an entry differing only by tzname. Abbreviations
|
565 |
+
# get changed from the initial guess by the database maintainers to
|
566 |
+
# match reality when this information is discovered.
|
567 |
+
for localized_tz in tz._tzinfos.values():
|
568 |
+
if (localized_tz._utcoffset == utcoffset and
|
569 |
+
localized_tz._dst == dstoffset):
|
570 |
+
return localized_tz
|
571 |
+
|
572 |
+
# This (utcoffset, dstoffset) information has been removed from the
|
573 |
+
# zone. Add it back. This might occur when the database maintainers have
|
574 |
+
# corrected incorrect information. datetime instances using this
|
575 |
+
# incorrect information will continue to do so, exactly as they were
|
576 |
+
# before being pickled. This is purely an overly paranoid safety net - I
|
577 |
+
# doubt this will ever been needed in real life.
|
578 |
+
inf = (utcoffset, dstoffset, tzname)
|
579 |
+
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
|
580 |
+
return tz._tzinfos[inf]
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/CET
ADDED
Binary file (2.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/CST6CDT
ADDED
Binary file (2.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Cuba
ADDED
Binary file (2.42 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/EET
ADDED
Binary file (1.91 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/EST
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/EST5EDT
ADDED
Binary file (2.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Egypt
ADDED
Binary file (2.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+12
ADDED
Binary file (117 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+2
ADDED
Binary file (116 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+5
ADDED
Binary file (116 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+7
ADDED
Binary file (116 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+8
ADDED
Binary file (116 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-11
ADDED
Binary file (118 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-3
ADDED
Binary file (117 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-7
ADDED
Binary file (117 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT0
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/UCT
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Universal
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Factory
ADDED
Binary file (116 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GB-Eire
ADDED
Binary file (3.66 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT+0
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT-0
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/GMT0
ADDED
Binary file (114 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/HST
ADDED
Binary file (115 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pytz/zoneinfo/Hongkong
ADDED
Binary file (1.23 kB). View file
|
|