applied-ai-018 commited on
Commit
5cc660e
·
verified ·
1 Parent(s): 13336c2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/align.py +213 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/api.py +2 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/engines.py +143 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/eval.py +415 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/expr.py +840 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/expressions.py +286 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/core/computation/pytables.py +666 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__init__.py +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/range.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/accessors.py +643 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/api.py +388 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/base.py +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/category.py +513 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py +843 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/datetimes.py +1127 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/extension.py +172 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/frozen.py +120 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/interval.py +1136 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/multi.py +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/period.py +614 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/range.py +1187 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/timedeltas.py +356 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__init__.py +0 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/describe.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/selectn.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/to_dict.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/describe.py +416 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/selectn.py +269 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/core/methods/to_dict.py +272 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/core/reshape/__init__.py +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc ADDED
Binary file (8.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/align.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core eval alignment algorithms.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from functools import (
7
+ partial,
8
+ wraps,
9
+ )
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Callable,
13
+ )
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from pandas.errors import PerformanceWarning
19
+ from pandas.util._exceptions import find_stack_level
20
+
21
+ from pandas.core.dtypes.generic import (
22
+ ABCDataFrame,
23
+ ABCSeries,
24
+ )
25
+
26
+ from pandas.core.base import PandasObject
27
+ import pandas.core.common as com
28
+ from pandas.core.computation.common import result_type_many
29
+
30
+ if TYPE_CHECKING:
31
+ from collections.abc import Sequence
32
+
33
+ from pandas._typing import F
34
+
35
+ from pandas.core.generic import NDFrame
36
+ from pandas.core.indexes.api import Index
37
+
38
+
39
+ def _align_core_single_unary_op(
40
+ term,
41
+ ) -> tuple[partial | type[NDFrame], dict[str, Index] | None]:
42
+ typ: partial | type[NDFrame]
43
+ axes: dict[str, Index] | None = None
44
+
45
+ if isinstance(term.value, np.ndarray):
46
+ typ = partial(np.asanyarray, dtype=term.value.dtype)
47
+ else:
48
+ typ = type(term.value)
49
+ if hasattr(term.value, "axes"):
50
+ axes = _zip_axes_from_type(typ, term.value.axes)
51
+
52
+ return typ, axes
53
+
54
+
55
+ def _zip_axes_from_type(
56
+ typ: type[NDFrame], new_axes: Sequence[Index]
57
+ ) -> dict[str, Index]:
58
+ return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
59
+
60
+
61
+ def _any_pandas_objects(terms) -> bool:
62
+ """
63
+ Check a sequence of terms for instances of PandasObject.
64
+ """
65
+ return any(isinstance(term.value, PandasObject) for term in terms)
66
+
67
+
68
+ def _filter_special_cases(f) -> Callable[[F], F]:
69
+ @wraps(f)
70
+ def wrapper(terms):
71
+ # single unary operand
72
+ if len(terms) == 1:
73
+ return _align_core_single_unary_op(terms[0])
74
+
75
+ term_values = (term.value for term in terms)
76
+
77
+ # we don't have any pandas objects
78
+ if not _any_pandas_objects(terms):
79
+ return result_type_many(*term_values), None
80
+
81
+ return f(terms)
82
+
83
+ return wrapper
84
+
85
+
86
+ @_filter_special_cases
87
+ def _align_core(terms):
88
+ term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
89
+ term_dims = [terms[i].value.ndim for i in term_index]
90
+
91
+ from pandas import Series
92
+
93
+ ndims = Series(dict(zip(term_index, term_dims)))
94
+
95
+ # initial axes are the axes of the largest-axis'd term
96
+ biggest = terms[ndims.idxmax()].value
97
+ typ = biggest._constructor
98
+ axes = biggest.axes
99
+ naxes = len(axes)
100
+ gt_than_one_axis = naxes > 1
101
+
102
+ for value in (terms[i].value for i in term_index):
103
+ is_series = isinstance(value, ABCSeries)
104
+ is_series_and_gt_one_axis = is_series and gt_than_one_axis
105
+
106
+ for axis, items in enumerate(value.axes):
107
+ if is_series_and_gt_one_axis:
108
+ ax, itm = naxes - 1, value.index
109
+ else:
110
+ ax, itm = axis, items
111
+
112
+ if not axes[ax].is_(itm):
113
+ axes[ax] = axes[ax].union(itm)
114
+
115
+ for i, ndim in ndims.items():
116
+ for axis, items in zip(range(ndim), axes):
117
+ ti = terms[i].value
118
+
119
+ if hasattr(ti, "reindex"):
120
+ transpose = isinstance(ti, ABCSeries) and naxes > 1
121
+ reindexer = axes[naxes - 1] if transpose else items
122
+
123
+ term_axis_size = len(ti.axes[axis])
124
+ reindexer_size = len(reindexer)
125
+
126
+ ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
127
+ if ordm >= 1 and reindexer_size >= 10000:
128
+ w = (
129
+ f"Alignment difference on axis {axis} is larger "
130
+ f"than an order of magnitude on term {repr(terms[i].name)}, "
131
+ f"by more than {ordm:.4g}; performance may suffer."
132
+ )
133
+ warnings.warn(
134
+ w, category=PerformanceWarning, stacklevel=find_stack_level()
135
+ )
136
+
137
+ obj = ti.reindex(reindexer, axis=axis, copy=False)
138
+ terms[i].update(obj)
139
+
140
+ terms[i].update(terms[i].value.values)
141
+
142
+ return typ, _zip_axes_from_type(typ, axes)
143
+
144
+
145
+ def align_terms(terms):
146
+ """
147
+ Align a set of terms.
148
+ """
149
+ try:
150
+ # flatten the parse tree (a nested list, really)
151
+ terms = list(com.flatten(terms))
152
+ except TypeError:
153
+ # can't iterate so it must just be a constant or single variable
154
+ if isinstance(terms.value, (ABCSeries, ABCDataFrame)):
155
+ typ = type(terms.value)
156
+ return typ, _zip_axes_from_type(typ, terms.value.axes)
157
+ return np.result_type(terms.type), None
158
+
159
+ # if all resolved variables are numeric scalars
160
+ if all(term.is_scalar for term in terms):
161
+ return result_type_many(*(term.value for term in terms)).type, None
162
+
163
+ # perform the main alignment
164
+ typ, axes = _align_core(terms)
165
+ return typ, axes
166
+
167
+
168
+ def reconstruct_object(typ, obj, axes, dtype):
169
+ """
170
+ Reconstruct an object given its type, raw value, and possibly empty
171
+ (None) axes.
172
+
173
+ Parameters
174
+ ----------
175
+ typ : object
176
+ A type
177
+ obj : object
178
+ The value to use in the type constructor
179
+ axes : dict
180
+ The axes to use to construct the resulting pandas object
181
+
182
+ Returns
183
+ -------
184
+ ret : typ
185
+ An object of type ``typ`` with the value `obj` and possible axes
186
+ `axes`.
187
+ """
188
+ try:
189
+ typ = typ.type
190
+ except AttributeError:
191
+ pass
192
+
193
+ res_t = np.result_type(obj.dtype, dtype)
194
+
195
+ if not isinstance(typ, partial) and issubclass(typ, PandasObject):
196
+ return typ(obj, dtype=res_t, **axes)
197
+
198
+ # special case for pathological things like ~True/~False
199
+ if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
200
+ ret_value = res_t.type(obj)
201
+ else:
202
+ ret_value = typ(obj).astype(res_t)
203
+ # The condition is to distinguish 0-dim array (returned in case of
204
+ # scalar) and 1 element array
205
+ # e.g. np.array(0) and np.array([0])
206
+ if (
207
+ len(obj.shape) == 1
208
+ and len(obj) == 1
209
+ and not isinstance(ret_value, np.ndarray)
210
+ ):
211
+ ret_value = np.array([ret_value]).astype(res_t)
212
+
213
+ return ret_value
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/api.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __all__ = ["eval"]
2
+ from pandas.core.computation.eval import eval
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/engines.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Engine classes for :func:`~pandas.eval`
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import abc
7
+ from typing import TYPE_CHECKING
8
+
9
+ from pandas.errors import NumExprClobberingError
10
+
11
+ from pandas.core.computation.align import (
12
+ align_terms,
13
+ reconstruct_object,
14
+ )
15
+ from pandas.core.computation.ops import (
16
+ MATHOPS,
17
+ REDUCTIONS,
18
+ )
19
+
20
+ from pandas.io.formats import printing
21
+
22
+ if TYPE_CHECKING:
23
+ from pandas.core.computation.expr import Expr
24
+
25
+ _ne_builtins = frozenset(MATHOPS + REDUCTIONS)
26
+
27
+
28
+ def _check_ne_builtin_clash(expr: Expr) -> None:
29
+ """
30
+ Attempt to prevent foot-shooting in a helpful way.
31
+
32
+ Parameters
33
+ ----------
34
+ expr : Expr
35
+ Terms can contain
36
+ """
37
+ names = expr.names
38
+ overlap = names & _ne_builtins
39
+
40
+ if overlap:
41
+ s = ", ".join([repr(x) for x in overlap])
42
+ raise NumExprClobberingError(
43
+ f'Variables in expression "{expr}" overlap with builtins: ({s})'
44
+ )
45
+
46
+
47
+ class AbstractEngine(metaclass=abc.ABCMeta):
48
+ """Object serving as a base class for all engines."""
49
+
50
+ has_neg_frac = False
51
+
52
+ def __init__(self, expr) -> None:
53
+ self.expr = expr
54
+ self.aligned_axes = None
55
+ self.result_type = None
56
+
57
+ def convert(self) -> str:
58
+ """
59
+ Convert an expression for evaluation.
60
+
61
+ Defaults to return the expression as a string.
62
+ """
63
+ return printing.pprint_thing(self.expr)
64
+
65
+ def evaluate(self) -> object:
66
+ """
67
+ Run the engine on the expression.
68
+
69
+ This method performs alignment which is necessary no matter what engine
70
+ is being used, thus its implementation is in the base class.
71
+
72
+ Returns
73
+ -------
74
+ object
75
+ The result of the passed expression.
76
+ """
77
+ if not self._is_aligned:
78
+ self.result_type, self.aligned_axes = align_terms(self.expr.terms)
79
+
80
+ # make sure no names in resolvers and locals/globals clash
81
+ res = self._evaluate()
82
+ return reconstruct_object(
83
+ self.result_type, res, self.aligned_axes, self.expr.terms.return_type
84
+ )
85
+
86
+ @property
87
+ def _is_aligned(self) -> bool:
88
+ return self.aligned_axes is not None and self.result_type is not None
89
+
90
+ @abc.abstractmethod
91
+ def _evaluate(self):
92
+ """
93
+ Return an evaluated expression.
94
+
95
+ Parameters
96
+ ----------
97
+ env : Scope
98
+ The local and global environment in which to evaluate an
99
+ expression.
100
+
101
+ Notes
102
+ -----
103
+ Must be implemented by subclasses.
104
+ """
105
+
106
+
107
+ class NumExprEngine(AbstractEngine):
108
+ """NumExpr engine class"""
109
+
110
+ has_neg_frac = True
111
+
112
+ def _evaluate(self):
113
+ import numexpr as ne
114
+
115
+ # convert the expression to a valid numexpr expression
116
+ s = self.convert()
117
+
118
+ env = self.expr.env
119
+ scope = env.full_scope
120
+ _check_ne_builtin_clash(self.expr)
121
+ return ne.evaluate(s, local_dict=scope)
122
+
123
+
124
+ class PythonEngine(AbstractEngine):
125
+ """
126
+ Evaluate an expression in Python space.
127
+
128
+ Mostly for testing purposes.
129
+ """
130
+
131
+ has_neg_frac = False
132
+
133
+ def evaluate(self):
134
+ return self.expr()
135
+
136
+ def _evaluate(self) -> None:
137
+ pass
138
+
139
+
140
+ ENGINES: dict[str, type[AbstractEngine]] = {
141
+ "numexpr": NumExprEngine,
142
+ "python": PythonEngine,
143
+ }
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/eval.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Top level ``eval`` module.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import tokenize
7
+ from typing import TYPE_CHECKING
8
+ import warnings
9
+
10
+ from pandas.util._exceptions import find_stack_level
11
+ from pandas.util._validators import validate_bool_kwarg
12
+
13
+ from pandas.core.dtypes.common import is_extension_array_dtype
14
+
15
+ from pandas.core.computation.engines import ENGINES
16
+ from pandas.core.computation.expr import (
17
+ PARSERS,
18
+ Expr,
19
+ )
20
+ from pandas.core.computation.parsing import tokenize_string
21
+ from pandas.core.computation.scope import ensure_scope
22
+ from pandas.core.generic import NDFrame
23
+
24
+ from pandas.io.formats.printing import pprint_thing
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas.core.computation.ops import BinOp
28
+
29
+
30
+ def _check_engine(engine: str | None) -> str:
31
+ """
32
+ Make sure a valid engine is passed.
33
+
34
+ Parameters
35
+ ----------
36
+ engine : str
37
+ String to validate.
38
+
39
+ Raises
40
+ ------
41
+ KeyError
42
+ * If an invalid engine is passed.
43
+ ImportError
44
+ * If numexpr was requested but doesn't exist.
45
+
46
+ Returns
47
+ -------
48
+ str
49
+ Engine name.
50
+ """
51
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
52
+ from pandas.core.computation.expressions import USE_NUMEXPR
53
+
54
+ if engine is None:
55
+ engine = "numexpr" if USE_NUMEXPR else "python"
56
+
57
+ if engine not in ENGINES:
58
+ valid_engines = list(ENGINES.keys())
59
+ raise KeyError(
60
+ f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
61
+ )
62
+
63
+ # TODO: validate this in a more general way (thinking of future engines
64
+ # that won't necessarily be import-able)
65
+ # Could potentially be done on engine instantiation
66
+ if engine == "numexpr" and not NUMEXPR_INSTALLED:
67
+ raise ImportError(
68
+ "'numexpr' is not installed or an unsupported version. Cannot use "
69
+ "engine='numexpr' for query/eval if 'numexpr' is not installed"
70
+ )
71
+
72
+ return engine
73
+
74
+
75
+ def _check_parser(parser: str):
76
+ """
77
+ Make sure a valid parser is passed.
78
+
79
+ Parameters
80
+ ----------
81
+ parser : str
82
+
83
+ Raises
84
+ ------
85
+ KeyError
86
+ * If an invalid parser is passed
87
+ """
88
+ if parser not in PARSERS:
89
+ raise KeyError(
90
+ f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
91
+ )
92
+
93
+
94
+ def _check_resolvers(resolvers):
95
+ if resolvers is not None:
96
+ for resolver in resolvers:
97
+ if not hasattr(resolver, "__getitem__"):
98
+ name = type(resolver).__name__
99
+ raise TypeError(
100
+ f"Resolver of type '{name}' does not "
101
+ "implement the __getitem__ method"
102
+ )
103
+
104
+
105
+ def _check_expression(expr):
106
+ """
107
+ Make sure an expression is not an empty string
108
+
109
+ Parameters
110
+ ----------
111
+ expr : object
112
+ An object that can be converted to a string
113
+
114
+ Raises
115
+ ------
116
+ ValueError
117
+ * If expr is an empty string
118
+ """
119
+ if not expr:
120
+ raise ValueError("expr cannot be an empty string")
121
+
122
+
123
+ def _convert_expression(expr) -> str:
124
+ """
125
+ Convert an object to an expression.
126
+
127
+ This function converts an object to an expression (a unicode string) and
128
+ checks to make sure it isn't empty after conversion. This is used to
129
+ convert operators to their string representation for recursive calls to
130
+ :func:`~pandas.eval`.
131
+
132
+ Parameters
133
+ ----------
134
+ expr : object
135
+ The object to be converted to a string.
136
+
137
+ Returns
138
+ -------
139
+ str
140
+ The string representation of an object.
141
+
142
+ Raises
143
+ ------
144
+ ValueError
145
+ * If the expression is empty.
146
+ """
147
+ s = pprint_thing(expr)
148
+ _check_expression(s)
149
+ return s
150
+
151
+
152
+ def _check_for_locals(expr: str, stack_level: int, parser: str):
153
+ at_top_of_stack = stack_level == 0
154
+ not_pandas_parser = parser != "pandas"
155
+
156
+ if not_pandas_parser:
157
+ msg = "The '@' prefix is only supported by the pandas parser"
158
+ elif at_top_of_stack:
159
+ msg = (
160
+ "The '@' prefix is not allowed in top-level eval calls.\n"
161
+ "please refer to your variables by name without the '@' prefix."
162
+ )
163
+
164
+ if at_top_of_stack or not_pandas_parser:
165
+ for toknum, tokval in tokenize_string(expr):
166
+ if toknum == tokenize.OP and tokval == "@":
167
+ raise SyntaxError(msg)
168
+
169
+
170
+ def eval(
171
+ expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users
172
+ parser: str = "pandas",
173
+ engine: str | None = None,
174
+ local_dict=None,
175
+ global_dict=None,
176
+ resolvers=(),
177
+ level: int = 0,
178
+ target=None,
179
+ inplace: bool = False,
180
+ ):
181
+ """
182
+ Evaluate a Python expression as a string using various backends.
183
+
184
+ The following arithmetic operations are supported: ``+``, ``-``, ``*``,
185
+ ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
186
+ boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
187
+ Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
188
+ :keyword:`or`, and :keyword:`not` with the same semantics as the
189
+ corresponding bitwise operators. :class:`~pandas.Series` and
190
+ :class:`~pandas.DataFrame` objects are supported and behave as they would
191
+ with plain ol' Python evaluation.
192
+
193
+ Parameters
194
+ ----------
195
+ expr : str
196
+ The expression to evaluate. This string cannot contain any Python
197
+ `statements
198
+ <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
199
+ only Python `expressions
200
+ <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
201
+ parser : {'pandas', 'python'}, default 'pandas'
202
+ The parser to use to construct the syntax tree from the expression. The
203
+ default of ``'pandas'`` parses code slightly different than standard
204
+ Python. Alternatively, you can parse an expression using the
205
+ ``'python'`` parser to retain strict Python semantics. See the
206
+ :ref:`enhancing performance <enhancingperf.eval>` documentation for
207
+ more details.
208
+ engine : {'python', 'numexpr'}, default 'numexpr'
209
+
210
+ The engine used to evaluate the expression. Supported engines are
211
+
212
+ - None : tries to use ``numexpr``, falls back to ``python``
213
+ - ``'numexpr'`` : This default engine evaluates pandas objects using
214
+ numexpr for large speed ups in complex expressions with large frames.
215
+ - ``'python'`` : Performs operations as if you had ``eval``'d in top
216
+ level python. This engine is generally not that useful.
217
+
218
+ More backends may be available in the future.
219
+ local_dict : dict or None, optional
220
+ A dictionary of local variables, taken from locals() by default.
221
+ global_dict : dict or None, optional
222
+ A dictionary of global variables, taken from globals() by default.
223
+ resolvers : list of dict-like or None, optional
224
+ A list of objects implementing the ``__getitem__`` special method that
225
+ you can use to inject an additional collection of namespaces to use for
226
+ variable lookup. For example, this is used in the
227
+ :meth:`~DataFrame.query` method to inject the
228
+ ``DataFrame.index`` and ``DataFrame.columns``
229
+ variables that refer to their respective :class:`~pandas.DataFrame`
230
+ instance attributes.
231
+ level : int, optional
232
+ The number of prior stack frames to traverse and add to the current
233
+ scope. Most users will **not** need to change this parameter.
234
+ target : object, optional, default None
235
+ This is the target object for assignment. It is used when there is
236
+ variable assignment in the expression. If so, then `target` must
237
+ support item assignment with string keys, and if a copy is being
238
+ returned, it must also support `.copy()`.
239
+ inplace : bool, default False
240
+ If `target` is provided, and the expression mutates `target`, whether
241
+ to modify `target` inplace. Otherwise, return a copy of `target` with
242
+ the mutation.
243
+
244
+ Returns
245
+ -------
246
+ ndarray, numeric scalar, DataFrame, Series, or None
247
+ The completion value of evaluating the given code or None if ``inplace=True``.
248
+
249
+ Raises
250
+ ------
251
+ ValueError
252
+ There are many instances where such an error can be raised:
253
+
254
+ - `target=None`, but the expression is multiline.
255
+ - The expression is multiline, but not all them have item assignment.
256
+ An example of such an arrangement is this:
257
+
258
+ a = b + 1
259
+ a + 2
260
+
261
+ Here, there are expressions on different lines, making it multiline,
262
+ but the last line has no variable assigned to the output of `a + 2`.
263
+ - `inplace=True`, but the expression is missing item assignment.
264
+ - Item assignment is provided, but the `target` does not support
265
+ string item assignment.
266
+ - Item assignment is provided and `inplace=False`, but the `target`
267
+ does not support the `.copy()` method
268
+
269
+ See Also
270
+ --------
271
+ DataFrame.query : Evaluates a boolean expression to query the columns
272
+ of a frame.
273
+ DataFrame.eval : Evaluate a string describing operations on
274
+ DataFrame columns.
275
+
276
+ Notes
277
+ -----
278
+ The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
279
+ recursively cast to ``float64``.
280
+
281
+ See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
282
+ more details.
283
+
284
+ Examples
285
+ --------
286
+ >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]})
287
+ >>> df
288
+ animal age
289
+ 0 dog 10
290
+ 1 pig 20
291
+
292
+ We can add a new column using ``pd.eval``:
293
+
294
+ >>> pd.eval("double_age = df.age * 2", target=df)
295
+ animal age double_age
296
+ 0 dog 10 20
297
+ 1 pig 20 40
298
+ """
299
+ inplace = validate_bool_kwarg(inplace, "inplace")
300
+
301
+ exprs: list[str | BinOp]
302
+ if isinstance(expr, str):
303
+ _check_expression(expr)
304
+ exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""]
305
+ else:
306
+ # ops.BinOp; for internal compat, not intended to be passed by users
307
+ exprs = [expr]
308
+ multi_line = len(exprs) > 1
309
+
310
+ if multi_line and target is None:
311
+ raise ValueError(
312
+ "multi-line expressions are only valid in the "
313
+ "context of data, use DataFrame.eval"
314
+ )
315
+ engine = _check_engine(engine)
316
+ _check_parser(parser)
317
+ _check_resolvers(resolvers)
318
+
319
+ ret = None
320
+ first_expr = True
321
+ target_modified = False
322
+
323
+ for expr in exprs:
324
+ expr = _convert_expression(expr)
325
+ _check_for_locals(expr, level, parser)
326
+
327
+ # get our (possibly passed-in) scope
328
+ env = ensure_scope(
329
+ level + 1,
330
+ global_dict=global_dict,
331
+ local_dict=local_dict,
332
+ resolvers=resolvers,
333
+ target=target,
334
+ )
335
+
336
+ parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)
337
+
338
+ if engine == "numexpr" and (
339
+ is_extension_array_dtype(parsed_expr.terms.return_type)
340
+ or getattr(parsed_expr.terms, "operand_types", None) is not None
341
+ and any(
342
+ is_extension_array_dtype(elem)
343
+ for elem in parsed_expr.terms.operand_types
344
+ )
345
+ ):
346
+ warnings.warn(
347
+ "Engine has switched to 'python' because numexpr does not support "
348
+ "extension array dtypes. Please set your engine to python manually.",
349
+ RuntimeWarning,
350
+ stacklevel=find_stack_level(),
351
+ )
352
+ engine = "python"
353
+
354
+ # construct the engine and evaluate the parsed expression
355
+ eng = ENGINES[engine]
356
+ eng_inst = eng(parsed_expr)
357
+ ret = eng_inst.evaluate()
358
+
359
+ if parsed_expr.assigner is None:
360
+ if multi_line:
361
+ raise ValueError(
362
+ "Multi-line expressions are only valid "
363
+ "if all expressions contain an assignment"
364
+ )
365
+ if inplace:
366
+ raise ValueError("Cannot operate inplace if there is no assignment")
367
+
368
+ # assign if needed
369
+ assigner = parsed_expr.assigner
370
+ if env.target is not None and assigner is not None:
371
+ target_modified = True
372
+
373
+ # if returning a copy, copy only on the first assignment
374
+ if not inplace and first_expr:
375
+ try:
376
+ target = env.target
377
+ if isinstance(target, NDFrame):
378
+ target = target.copy(deep=None)
379
+ else:
380
+ target = target.copy()
381
+ except AttributeError as err:
382
+ raise ValueError("Cannot return a copy of the target") from err
383
+ else:
384
+ target = env.target
385
+
386
+ # TypeError is most commonly raised (e.g. int, list), but you
387
+ # get IndexError if you try to do this assignment on np.ndarray.
388
+ # we will ignore numpy warnings here; e.g. if trying
389
+ # to use a non-numeric indexer
390
+ try:
391
+ if inplace and isinstance(target, NDFrame):
392
+ target.loc[:, assigner] = ret
393
+ else:
394
+ target[assigner] = ret # pyright: ignore[reportGeneralTypeIssues]
395
+ except (TypeError, IndexError) as err:
396
+ raise ValueError("Cannot assign expression output to target") from err
397
+
398
+ if not resolvers:
399
+ resolvers = ({assigner: ret},)
400
+ else:
401
+ # existing resolver needs updated to handle
402
+ # case of mutating existing column in copy
403
+ for resolver in resolvers:
404
+ if assigner in resolver:
405
+ resolver[assigner] = ret
406
+ break
407
+ else:
408
+ resolvers += ({assigner: ret},)
409
+
410
+ ret = None
411
+ first_expr = False
412
+
413
+ # We want to exclude `inplace=None` as being False.
414
+ if inplace is False:
415
+ return target if target_modified else ret
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/expr.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ :func:`~pandas.eval` parsers.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import ast
7
+ from functools import (
8
+ partial,
9
+ reduce,
10
+ )
11
+ from keyword import iskeyword
12
+ import tokenize
13
+ from typing import (
14
+ Callable,
15
+ ClassVar,
16
+ TypeVar,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ from pandas.errors import UndefinedVariableError
22
+
23
+ import pandas.core.common as com
24
+ from pandas.core.computation.ops import (
25
+ ARITH_OPS_SYMS,
26
+ BOOL_OPS_SYMS,
27
+ CMP_OPS_SYMS,
28
+ LOCAL_TAG,
29
+ MATHOPS,
30
+ REDUCTIONS,
31
+ UNARY_OPS_SYMS,
32
+ BinOp,
33
+ Constant,
34
+ Div,
35
+ FuncNode,
36
+ Op,
37
+ Term,
38
+ UnaryOp,
39
+ is_term,
40
+ )
41
+ from pandas.core.computation.parsing import (
42
+ clean_backtick_quoted_toks,
43
+ tokenize_string,
44
+ )
45
+ from pandas.core.computation.scope import Scope
46
+
47
+ from pandas.io.formats import printing
48
+
49
+
50
+ def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:
51
+ """
52
+ Rewrite the assignment operator for PyTables expressions that use ``=``
53
+ as a substitute for ``==``.
54
+
55
+ Parameters
56
+ ----------
57
+ tok : tuple of int, str
58
+ ints correspond to the all caps constants in the tokenize module
59
+
60
+ Returns
61
+ -------
62
+ tuple of int, str
63
+ Either the input or token or the replacement values
64
+ """
65
+ toknum, tokval = tok
66
+ return toknum, "==" if tokval == "=" else tokval
67
+
68
+
69
+ def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:
70
+ """
71
+ Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
72
+ precedence is changed to boolean precedence.
73
+
74
+ Parameters
75
+ ----------
76
+ tok : tuple of int, str
77
+ ints correspond to the all caps constants in the tokenize module
78
+
79
+ Returns
80
+ -------
81
+ tuple of int, str
82
+ Either the input or token or the replacement values
83
+ """
84
+ toknum, tokval = tok
85
+ if toknum == tokenize.OP:
86
+ if tokval == "&":
87
+ return tokenize.NAME, "and"
88
+ elif tokval == "|":
89
+ return tokenize.NAME, "or"
90
+ return toknum, tokval
91
+ return toknum, tokval
92
+
93
+
94
+ def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:
95
+ """
96
+ Replace local variables with a syntactically valid name.
97
+
98
+ Parameters
99
+ ----------
100
+ tok : tuple of int, str
101
+ ints correspond to the all caps constants in the tokenize module
102
+
103
+ Returns
104
+ -------
105
+ tuple of int, str
106
+ Either the input or token or the replacement values
107
+
108
+ Notes
109
+ -----
110
+ This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
111
+ ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
112
+ is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
113
+ """
114
+ toknum, tokval = tok
115
+ if toknum == tokenize.OP and tokval == "@":
116
+ return tokenize.OP, LOCAL_TAG
117
+ return toknum, tokval
118
+
119
+
120
+ def _compose2(f, g):
121
+ """
122
+ Compose 2 callables.
123
+ """
124
+ return lambda *args, **kwargs: f(g(*args, **kwargs))
125
+
126
+
127
+ def _compose(*funcs):
128
+ """
129
+ Compose 2 or more callables.
130
+ """
131
+ assert len(funcs) > 1, "At least 2 callables must be passed to compose"
132
+ return reduce(_compose2, funcs)
133
+
134
+
135
+ def _preparse(
136
+ source: str,
137
+ f=_compose(
138
+ _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks
139
+ ),
140
+ ) -> str:
141
+ """
142
+ Compose a collection of tokenization functions.
143
+
144
+ Parameters
145
+ ----------
146
+ source : str
147
+ A Python source code string
148
+ f : callable
149
+ This takes a tuple of (toknum, tokval) as its argument and returns a
150
+ tuple with the same structure but possibly different elements. Defaults
151
+ to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
152
+ ``_replace_locals``.
153
+
154
+ Returns
155
+ -------
156
+ str
157
+ Valid Python source code
158
+
159
+ Notes
160
+ -----
161
+ The `f` parameter can be any callable that takes *and* returns input of the
162
+ form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
163
+ the ``tokenize`` module and ``tokval`` is a string.
164
+ """
165
+ assert callable(f), "f must be callable"
166
+ return tokenize.untokenize(f(x) for x in tokenize_string(source))
167
+
168
+
169
+ def _is_type(t):
170
+ """
171
+ Factory for a type checking function of type ``t`` or tuple of types.
172
+ """
173
+ return lambda x: isinstance(x.value, t)
174
+
175
+
176
+ _is_list = _is_type(list)
177
+ _is_str = _is_type(str)
178
+
179
+
180
+ # partition all AST nodes
181
+ _all_nodes = frozenset(
182
+ node
183
+ for node in (getattr(ast, name) for name in dir(ast))
184
+ if isinstance(node, type) and issubclass(node, ast.AST)
185
+ )
186
+
187
+
188
+ def _filter_nodes(superclass, all_nodes=_all_nodes):
189
+ """
190
+ Filter out AST nodes that are subclasses of ``superclass``.
191
+ """
192
+ node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass))
193
+ return frozenset(node_names)
194
+
195
+
196
+ _all_node_names = frozenset(x.__name__ for x in _all_nodes)
197
+ _mod_nodes = _filter_nodes(ast.mod)
198
+ _stmt_nodes = _filter_nodes(ast.stmt)
199
+ _expr_nodes = _filter_nodes(ast.expr)
200
+ _expr_context_nodes = _filter_nodes(ast.expr_context)
201
+ _boolop_nodes = _filter_nodes(ast.boolop)
202
+ _operator_nodes = _filter_nodes(ast.operator)
203
+ _unary_op_nodes = _filter_nodes(ast.unaryop)
204
+ _cmp_op_nodes = _filter_nodes(ast.cmpop)
205
+ _comprehension_nodes = _filter_nodes(ast.comprehension)
206
+ _handler_nodes = _filter_nodes(ast.excepthandler)
207
+ _arguments_nodes = _filter_nodes(ast.arguments)
208
+ _keyword_nodes = _filter_nodes(ast.keyword)
209
+ _alias_nodes = _filter_nodes(ast.alias)
210
+
211
+
212
+ # nodes that we don't support directly but are needed for parsing
213
+ _hacked_nodes = frozenset(["Assign", "Module", "Expr"])
214
+
215
+
216
+ _unsupported_expr_nodes = frozenset(
217
+ [
218
+ "Yield",
219
+ "GeneratorExp",
220
+ "IfExp",
221
+ "DictComp",
222
+ "SetComp",
223
+ "Repr",
224
+ "Lambda",
225
+ "Set",
226
+ "AST",
227
+ "Is",
228
+ "IsNot",
229
+ ]
230
+ )
231
+
232
+ # these nodes are low priority or won't ever be supported (e.g., AST)
233
+ _unsupported_nodes = (
234
+ _stmt_nodes
235
+ | _mod_nodes
236
+ | _handler_nodes
237
+ | _arguments_nodes
238
+ | _keyword_nodes
239
+ | _alias_nodes
240
+ | _expr_context_nodes
241
+ | _unsupported_expr_nodes
242
+ ) - _hacked_nodes
243
+
244
+ # we're adding a different assignment in some cases to be equality comparison
245
+ # and we don't want `stmt` and friends in their so get only the class whose
246
+ # names are capitalized
247
+ _base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
248
+ intersection = _unsupported_nodes & _base_supported_nodes
249
+ _msg = f"cannot both support and not support {intersection}"
250
+ assert not intersection, _msg
251
+
252
+
253
+ def _node_not_implemented(node_name: str) -> Callable[..., None]:
254
+ """
255
+ Return a function that raises a NotImplementedError with a passed node name.
256
+ """
257
+
258
+ def f(self, *args, **kwargs):
259
+ raise NotImplementedError(f"'{node_name}' nodes are not implemented")
260
+
261
+ return f
262
+
263
+
264
+ # should be bound by BaseExprVisitor but that creates a circular dependency:
265
+ # _T is used in disallow, but disallow is used to define BaseExprVisitor
266
+ # https://github.com/microsoft/pyright/issues/2315
267
+ _T = TypeVar("_T")
268
+
269
+
270
+ def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]:
271
+ """
272
+ Decorator to disallow certain nodes from parsing. Raises a
273
+ NotImplementedError instead.
274
+
275
+ Returns
276
+ -------
277
+ callable
278
+ """
279
+
280
+ def disallowed(cls: type[_T]) -> type[_T]:
281
+ # error: "Type[_T]" has no attribute "unsupported_nodes"
282
+ cls.unsupported_nodes = () # type: ignore[attr-defined]
283
+ for node in nodes:
284
+ new_method = _node_not_implemented(node)
285
+ name = f"visit_{node}"
286
+ # error: "Type[_T]" has no attribute "unsupported_nodes"
287
+ cls.unsupported_nodes += (name,) # type: ignore[attr-defined]
288
+ setattr(cls, name, new_method)
289
+ return cls
290
+
291
+ return disallowed
292
+
293
+
294
+ def _op_maker(op_class, op_symbol):
295
+ """
296
+ Return a function to create an op class with its symbol already passed.
297
+
298
+ Returns
299
+ -------
300
+ callable
301
+ """
302
+
303
+ def f(self, node, *args, **kwargs):
304
+ """
305
+ Return a partial function with an Op subclass with an operator already passed.
306
+
307
+ Returns
308
+ -------
309
+ callable
310
+ """
311
+ return partial(op_class, op_symbol, *args, **kwargs)
312
+
313
+ return f
314
+
315
+
316
+ _op_classes = {"binary": BinOp, "unary": UnaryOp}
317
+
318
+
319
+ def add_ops(op_classes):
320
+ """
321
+ Decorator to add default implementation of ops.
322
+ """
323
+
324
+ def f(cls):
325
+ for op_attr_name, op_class in op_classes.items():
326
+ ops = getattr(cls, f"{op_attr_name}_ops")
327
+ ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map")
328
+ for op in ops:
329
+ op_node = ops_map[op]
330
+ if op_node is not None:
331
+ made_op = _op_maker(op_class, op)
332
+ setattr(cls, f"visit_{op_node}", made_op)
333
+ return cls
334
+
335
+ return f
336
+
337
+
338
+ @disallow(_unsupported_nodes)
339
+ @add_ops(_op_classes)
340
+ class BaseExprVisitor(ast.NodeVisitor):
341
+ """
342
+ Custom ast walker. Parsers of other engines should subclass this class
343
+ if necessary.
344
+
345
+ Parameters
346
+ ----------
347
+ env : Scope
348
+ engine : str
349
+ parser : str
350
+ preparser : callable
351
+ """
352
+
353
+ const_type: ClassVar[type[Term]] = Constant
354
+ term_type: ClassVar[type[Term]] = Term
355
+
356
+ binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS
357
+ binary_op_nodes = (
358
+ "Gt",
359
+ "Lt",
360
+ "GtE",
361
+ "LtE",
362
+ "Eq",
363
+ "NotEq",
364
+ "In",
365
+ "NotIn",
366
+ "BitAnd",
367
+ "BitOr",
368
+ "And",
369
+ "Or",
370
+ "Add",
371
+ "Sub",
372
+ "Mult",
373
+ None,
374
+ "Pow",
375
+ "FloorDiv",
376
+ "Mod",
377
+ )
378
+ binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
379
+
380
+ unary_ops = UNARY_OPS_SYMS
381
+ unary_op_nodes = "UAdd", "USub", "Invert", "Not"
382
+ unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
383
+
384
+ rewrite_map = {
385
+ ast.Eq: ast.In,
386
+ ast.NotEq: ast.NotIn,
387
+ ast.In: ast.In,
388
+ ast.NotIn: ast.NotIn,
389
+ }
390
+
391
+ unsupported_nodes: tuple[str, ...]
392
+
393
+ def __init__(self, env, engine, parser, preparser=_preparse) -> None:
394
+ self.env = env
395
+ self.engine = engine
396
+ self.parser = parser
397
+ self.preparser = preparser
398
+ self.assigner = None
399
+
400
+ def visit(self, node, **kwargs):
401
+ if isinstance(node, str):
402
+ clean = self.preparser(node)
403
+ try:
404
+ node = ast.fix_missing_locations(ast.parse(clean))
405
+ except SyntaxError as e:
406
+ if any(iskeyword(x) for x in clean.split()):
407
+ e.msg = "Python keyword not valid identifier in numexpr query"
408
+ raise e
409
+
410
+ method = f"visit_{type(node).__name__}"
411
+ visitor = getattr(self, method)
412
+ return visitor(node, **kwargs)
413
+
414
+ def visit_Module(self, node, **kwargs):
415
+ if len(node.body) != 1:
416
+ raise SyntaxError("only a single expression is allowed")
417
+ expr = node.body[0]
418
+ return self.visit(expr, **kwargs)
419
+
420
+ def visit_Expr(self, node, **kwargs):
421
+ return self.visit(node.value, **kwargs)
422
+
423
+ def _rewrite_membership_op(self, node, left, right):
424
+ # the kind of the operator (is actually an instance)
425
+ op_instance = node.op
426
+ op_type = type(op_instance)
427
+
428
+ # must be two terms and the comparison operator must be ==/!=/in/not in
429
+ if is_term(left) and is_term(right) and op_type in self.rewrite_map:
430
+ left_list, right_list = map(_is_list, (left, right))
431
+ left_str, right_str = map(_is_str, (left, right))
432
+
433
+ # if there are any strings or lists in the expression
434
+ if left_list or right_list or left_str or right_str:
435
+ op_instance = self.rewrite_map[op_type]()
436
+
437
+ # pop the string variable out of locals and replace it with a list
438
+ # of one string, kind of a hack
439
+ if right_str:
440
+ name = self.env.add_tmp([right.value])
441
+ right = self.term_type(name, self.env)
442
+
443
+ if left_str:
444
+ name = self.env.add_tmp([left.value])
445
+ left = self.term_type(name, self.env)
446
+
447
+ op = self.visit(op_instance)
448
+ return op, op_instance, left, right
449
+
450
+ def _maybe_transform_eq_ne(self, node, left=None, right=None):
451
+ if left is None:
452
+ left = self.visit(node.left, side="left")
453
+ if right is None:
454
+ right = self.visit(node.right, side="right")
455
+ op, op_class, left, right = self._rewrite_membership_op(node, left, right)
456
+ return op, op_class, left, right
457
+
458
+ def _maybe_downcast_constants(self, left, right):
459
+ f32 = np.dtype(np.float32)
460
+ if (
461
+ left.is_scalar
462
+ and hasattr(left, "value")
463
+ and not right.is_scalar
464
+ and right.return_type == f32
465
+ ):
466
+ # right is a float32 array, left is a scalar
467
+ name = self.env.add_tmp(np.float32(left.value))
468
+ left = self.term_type(name, self.env)
469
+ if (
470
+ right.is_scalar
471
+ and hasattr(right, "value")
472
+ and not left.is_scalar
473
+ and left.return_type == f32
474
+ ):
475
+ # left is a float32 array, right is a scalar
476
+ name = self.env.add_tmp(np.float32(right.value))
477
+ right = self.term_type(name, self.env)
478
+
479
+ return left, right
480
+
481
+ def _maybe_eval(self, binop, eval_in_python):
482
+ # eval `in` and `not in` (for now) in "partial" python space
483
+ # things that can be evaluated in "eval" space will be turned into
484
+ # temporary variables. for example,
485
+ # [1,2] in a + 2 * b
486
+ # in that case a + 2 * b will be evaluated using numexpr, and the "in"
487
+ # call will be evaluated using isin (in python space)
488
+ return binop.evaluate(
489
+ self.env, self.engine, self.parser, self.term_type, eval_in_python
490
+ )
491
+
492
+ def _maybe_evaluate_binop(
493
+ self,
494
+ op,
495
+ op_class,
496
+ lhs,
497
+ rhs,
498
+ eval_in_python=("in", "not in"),
499
+ maybe_eval_in_python=("==", "!=", "<", ">", "<=", ">="),
500
+ ):
501
+ res = op(lhs, rhs)
502
+
503
+ if res.has_invalid_return_type:
504
+ raise TypeError(
505
+ f"unsupported operand type(s) for {res.op}: "
506
+ f"'{lhs.type}' and '{rhs.type}'"
507
+ )
508
+
509
+ if self.engine != "pytables" and (
510
+ res.op in CMP_OPS_SYMS
511
+ and getattr(lhs, "is_datetime", False)
512
+ or getattr(rhs, "is_datetime", False)
513
+ ):
514
+ # all date ops must be done in python bc numexpr doesn't work
515
+ # well with NaT
516
+ return self._maybe_eval(res, self.binary_ops)
517
+
518
+ if res.op in eval_in_python:
519
+ # "in"/"not in" ops are always evaluated in python
520
+ return self._maybe_eval(res, eval_in_python)
521
+ elif self.engine != "pytables":
522
+ if (
523
+ getattr(lhs, "return_type", None) == object
524
+ or getattr(rhs, "return_type", None) == object
525
+ ):
526
+ # evaluate "==" and "!=" in python if either of our operands
527
+ # has an object return type
528
+ return self._maybe_eval(res, eval_in_python + maybe_eval_in_python)
529
+ return res
530
+
531
+ def visit_BinOp(self, node, **kwargs):
532
+ op, op_class, left, right = self._maybe_transform_eq_ne(node)
533
+ left, right = self._maybe_downcast_constants(left, right)
534
+ return self._maybe_evaluate_binop(op, op_class, left, right)
535
+
536
+ def visit_Div(self, node, **kwargs):
537
+ return lambda lhs, rhs: Div(lhs, rhs)
538
+
539
+ def visit_UnaryOp(self, node, **kwargs):
540
+ op = self.visit(node.op)
541
+ operand = self.visit(node.operand)
542
+ return op(operand)
543
+
544
+ def visit_Name(self, node, **kwargs) -> Term:
545
+ return self.term_type(node.id, self.env, **kwargs)
546
+
547
+ # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min
548
+ def visit_NameConstant(self, node, **kwargs) -> Term:
549
+ return self.const_type(node.value, self.env)
550
+
551
+ # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min
552
+ def visit_Num(self, node, **kwargs) -> Term:
553
+ return self.const_type(node.value, self.env)
554
+
555
+ def visit_Constant(self, node, **kwargs) -> Term:
556
+ return self.const_type(node.value, self.env)
557
+
558
+ # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min
559
+ def visit_Str(self, node, **kwargs) -> Term:
560
+ name = self.env.add_tmp(node.s)
561
+ return self.term_type(name, self.env)
562
+
563
+ def visit_List(self, node, **kwargs) -> Term:
564
+ name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])
565
+ return self.term_type(name, self.env)
566
+
567
+ visit_Tuple = visit_List
568
+
569
+ def visit_Index(self, node, **kwargs):
570
+ """df.index[4]"""
571
+ return self.visit(node.value)
572
+
573
+ def visit_Subscript(self, node, **kwargs) -> Term:
574
+ from pandas import eval as pd_eval
575
+
576
+ value = self.visit(node.value)
577
+ slobj = self.visit(node.slice)
578
+ result = pd_eval(
579
+ slobj, local_dict=self.env, engine=self.engine, parser=self.parser
580
+ )
581
+ try:
582
+ # a Term instance
583
+ v = value.value[result]
584
+ except AttributeError:
585
+ # an Op instance
586
+ lhs = pd_eval(
587
+ value, local_dict=self.env, engine=self.engine, parser=self.parser
588
+ )
589
+ v = lhs[result]
590
+ name = self.env.add_tmp(v)
591
+ return self.term_type(name, env=self.env)
592
+
593
+ def visit_Slice(self, node, **kwargs) -> slice:
594
+ """df.index[slice(4,6)]"""
595
+ lower = node.lower
596
+ if lower is not None:
597
+ lower = self.visit(lower).value
598
+ upper = node.upper
599
+ if upper is not None:
600
+ upper = self.visit(upper).value
601
+ step = node.step
602
+ if step is not None:
603
+ step = self.visit(step).value
604
+
605
+ return slice(lower, upper, step)
606
+
607
+ def visit_Assign(self, node, **kwargs):
608
+ """
609
+ support a single assignment node, like
610
+
611
+ c = a + b
612
+
613
+ set the assigner at the top level, must be a Name node which
614
+ might or might not exist in the resolvers
615
+
616
+ """
617
+ if len(node.targets) != 1:
618
+ raise SyntaxError("can only assign a single expression")
619
+ if not isinstance(node.targets[0], ast.Name):
620
+ raise SyntaxError("left hand side of an assignment must be a single name")
621
+ if self.env.target is None:
622
+ raise ValueError("cannot assign without a target object")
623
+
624
+ try:
625
+ assigner = self.visit(node.targets[0], **kwargs)
626
+ except UndefinedVariableError:
627
+ assigner = node.targets[0].id
628
+
629
+ self.assigner = getattr(assigner, "name", assigner)
630
+ if self.assigner is None:
631
+ raise SyntaxError(
632
+ "left hand side of an assignment must be a single resolvable name"
633
+ )
634
+
635
+ return self.visit(node.value, **kwargs)
636
+
637
+ def visit_Attribute(self, node, **kwargs):
638
+ attr = node.attr
639
+ value = node.value
640
+
641
+ ctx = node.ctx
642
+ if isinstance(ctx, ast.Load):
643
+ # resolve the value
644
+ resolved = self.visit(value).value
645
+ try:
646
+ v = getattr(resolved, attr)
647
+ name = self.env.add_tmp(v)
648
+ return self.term_type(name, self.env)
649
+ except AttributeError:
650
+ # something like datetime.datetime where scope is overridden
651
+ if isinstance(value, ast.Name) and value.id == attr:
652
+ return resolved
653
+ raise
654
+
655
+ raise ValueError(f"Invalid Attribute context {type(ctx).__name__}")
656
+
657
+ def visit_Call(self, node, side=None, **kwargs):
658
+ if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__":
659
+ res = self.visit_Attribute(node.func)
660
+ elif not isinstance(node.func, ast.Name):
661
+ raise TypeError("Only named functions are supported")
662
+ else:
663
+ try:
664
+ res = self.visit(node.func)
665
+ except UndefinedVariableError:
666
+ # Check if this is a supported function name
667
+ try:
668
+ res = FuncNode(node.func.id)
669
+ except ValueError:
670
+ # Raise original error
671
+ raise
672
+
673
+ if res is None:
674
+ # error: "expr" has no attribute "id"
675
+ raise ValueError(
676
+ f"Invalid function call {node.func.id}" # type: ignore[attr-defined]
677
+ )
678
+ if hasattr(res, "value"):
679
+ res = res.value
680
+
681
+ if isinstance(res, FuncNode):
682
+ new_args = [self.visit(arg) for arg in node.args]
683
+
684
+ if node.keywords:
685
+ raise TypeError(
686
+ f'Function "{res.name}" does not support keyword arguments'
687
+ )
688
+
689
+ return res(*new_args)
690
+
691
+ else:
692
+ new_args = [self.visit(arg)(self.env) for arg in node.args]
693
+
694
+ for key in node.keywords:
695
+ if not isinstance(key, ast.keyword):
696
+ # error: "expr" has no attribute "id"
697
+ raise ValueError(
698
+ "keyword error in function call "
699
+ f"'{node.func.id}'" # type: ignore[attr-defined]
700
+ )
701
+
702
+ if key.arg:
703
+ kwargs[key.arg] = self.visit(key.value)(self.env)
704
+
705
+ name = self.env.add_tmp(res(*new_args, **kwargs))
706
+ return self.term_type(name=name, env=self.env)
707
+
708
+ def translate_In(self, op):
709
+ return op
710
+
711
+ def visit_Compare(self, node, **kwargs):
712
+ ops = node.ops
713
+ comps = node.comparators
714
+
715
+ # base case: we have something like a CMP b
716
+ if len(comps) == 1:
717
+ op = self.translate_In(ops[0])
718
+ binop = ast.BinOp(op=op, left=node.left, right=comps[0])
719
+ return self.visit(binop)
720
+
721
+ # recursive case: we have a chained comparison, a CMP b CMP c, etc.
722
+ left = node.left
723
+ values = []
724
+ for op, comp in zip(ops, comps):
725
+ new_node = self.visit(
726
+ ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)])
727
+ )
728
+ left = comp
729
+ values.append(new_node)
730
+ return self.visit(ast.BoolOp(op=ast.And(), values=values))
731
+
732
+ def _try_visit_binop(self, bop):
733
+ if isinstance(bop, (Op, Term)):
734
+ return bop
735
+ return self.visit(bop)
736
+
737
+ def visit_BoolOp(self, node, **kwargs):
738
+ def visitor(x, y):
739
+ lhs = self._try_visit_binop(x)
740
+ rhs = self._try_visit_binop(y)
741
+
742
+ op, op_class, lhs, rhs = self._maybe_transform_eq_ne(node, lhs, rhs)
743
+ return self._maybe_evaluate_binop(op, node.op, lhs, rhs)
744
+
745
+ operands = node.values
746
+ return reduce(visitor, operands)
747
+
748
+
749
+ _python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"])
750
+ _numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS)
751
+
752
+
753
+ @disallow(
754
+ (_unsupported_nodes | _python_not_supported)
755
+ - (_boolop_nodes | frozenset(["BoolOp", "Attribute", "In", "NotIn", "Tuple"]))
756
+ )
757
+ class PandasExprVisitor(BaseExprVisitor):
758
+ def __init__(
759
+ self,
760
+ env,
761
+ engine,
762
+ parser,
763
+ preparser=partial(
764
+ _preparse,
765
+ f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks),
766
+ ),
767
+ ) -> None:
768
+ super().__init__(env, engine, parser, preparser)
769
+
770
+
771
+ @disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"]))
772
+ class PythonExprVisitor(BaseExprVisitor):
773
+ def __init__(
774
+ self, env, engine, parser, preparser=lambda source, f=None: source
775
+ ) -> None:
776
+ super().__init__(env, engine, parser, preparser=preparser)
777
+
778
+
779
+ class Expr:
780
+ """
781
+ Object encapsulating an expression.
782
+
783
+ Parameters
784
+ ----------
785
+ expr : str
786
+ engine : str, optional, default 'numexpr'
787
+ parser : str, optional, default 'pandas'
788
+ env : Scope, optional, default None
789
+ level : int, optional, default 2
790
+ """
791
+
792
+ env: Scope
793
+ engine: str
794
+ parser: str
795
+
796
+ def __init__(
797
+ self,
798
+ expr,
799
+ engine: str = "numexpr",
800
+ parser: str = "pandas",
801
+ env: Scope | None = None,
802
+ level: int = 0,
803
+ ) -> None:
804
+ self.expr = expr
805
+ self.env = env or Scope(level=level + 1)
806
+ self.engine = engine
807
+ self.parser = parser
808
+ self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
809
+ self.terms = self.parse()
810
+
811
+ @property
812
+ def assigner(self):
813
+ return getattr(self._visitor, "assigner", None)
814
+
815
+ def __call__(self):
816
+ return self.terms(self.env)
817
+
818
+ def __repr__(self) -> str:
819
+ return printing.pprint_thing(self.terms)
820
+
821
+ def __len__(self) -> int:
822
+ return len(self.expr)
823
+
824
+ def parse(self):
825
+ """
826
+ Parse an expression.
827
+ """
828
+ return self._visitor.visit(self.expr)
829
+
830
+ @property
831
+ def names(self):
832
+ """
833
+ Get the names in an expression.
834
+ """
835
+ if is_term(self.terms):
836
+ return frozenset([self.terms.name])
837
+ return frozenset(term.name for term in com.flatten(self.terms))
838
+
839
+
840
+ PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/expressions.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Expressions
3
+ -----------
4
+
5
+ Offer fast expression evaluation through numexpr
6
+
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import operator
11
+ from typing import TYPE_CHECKING
12
+ import warnings
13
+
14
+ import numpy as np
15
+
16
+ from pandas._config import get_option
17
+
18
+ from pandas.util._exceptions import find_stack_level
19
+
20
+ from pandas.core import roperator
21
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
22
+
23
+ if NUMEXPR_INSTALLED:
24
+ import numexpr as ne
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas._typing import FuncType
28
+
29
+ _TEST_MODE: bool | None = None
30
+ _TEST_RESULT: list[bool] = []
31
+ USE_NUMEXPR = NUMEXPR_INSTALLED
32
+ _evaluate: FuncType | None = None
33
+ _where: FuncType | None = None
34
+
35
+ # the set of dtypes that we will allow pass to numexpr
36
+ _ALLOWED_DTYPES = {
37
+ "evaluate": {"int64", "int32", "float64", "float32", "bool"},
38
+ "where": {"int64", "float64", "bool"},
39
+ }
40
+
41
+ # the minimum prod shape that we will use numexpr
42
+ _MIN_ELEMENTS = 1_000_000
43
+
44
+
45
+ def set_use_numexpr(v: bool = True) -> None:
46
+ # set/unset to use numexpr
47
+ global USE_NUMEXPR
48
+ if NUMEXPR_INSTALLED:
49
+ USE_NUMEXPR = v
50
+
51
+ # choose what we are going to do
52
+ global _evaluate, _where
53
+
54
+ _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard
55
+ _where = _where_numexpr if USE_NUMEXPR else _where_standard
56
+
57
+
58
+ def set_numexpr_threads(n=None) -> None:
59
+ # if we are using numexpr, set the threads to n
60
+ # otherwise reset
61
+ if NUMEXPR_INSTALLED and USE_NUMEXPR:
62
+ if n is None:
63
+ n = ne.detect_number_of_cores()
64
+ ne.set_num_threads(n)
65
+
66
+
67
+ def _evaluate_standard(op, op_str, a, b):
68
+ """
69
+ Standard evaluation.
70
+ """
71
+ if _TEST_MODE:
72
+ _store_test_result(False)
73
+ return op(a, b)
74
+
75
+
76
+ def _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool:
77
+ """return a boolean if we WILL be using numexpr"""
78
+ if op_str is not None:
79
+ # required min elements (otherwise we are adding overhead)
80
+ if a.size > _MIN_ELEMENTS:
81
+ # check for dtype compatibility
82
+ dtypes: set[str] = set()
83
+ for o in [a, b]:
84
+ # ndarray and Series Case
85
+ if hasattr(o, "dtype"):
86
+ dtypes |= {o.dtype.name}
87
+
88
+ # allowed are a superset
89
+ if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
90
+ return True
91
+
92
+ return False
93
+
94
+
95
+ def _evaluate_numexpr(op, op_str, a, b):
96
+ result = None
97
+
98
+ if _can_use_numexpr(op, op_str, a, b, "evaluate"):
99
+ is_reversed = op.__name__.strip("_").startswith("r")
100
+ if is_reversed:
101
+ # we were originally called by a reversed op method
102
+ a, b = b, a
103
+
104
+ a_value = a
105
+ b_value = b
106
+
107
+ try:
108
+ result = ne.evaluate(
109
+ f"a_value {op_str} b_value",
110
+ local_dict={"a_value": a_value, "b_value": b_value},
111
+ casting="safe",
112
+ )
113
+ except TypeError:
114
+ # numexpr raises eg for array ** array with integers
115
+ # (https://github.com/pydata/numexpr/issues/379)
116
+ pass
117
+ except NotImplementedError:
118
+ if _bool_arith_fallback(op_str, a, b):
119
+ pass
120
+ else:
121
+ raise
122
+
123
+ if is_reversed:
124
+ # reverse order to original for fallback
125
+ a, b = b, a
126
+
127
+ if _TEST_MODE:
128
+ _store_test_result(result is not None)
129
+
130
+ if result is None:
131
+ result = _evaluate_standard(op, op_str, a, b)
132
+
133
+ return result
134
+
135
+
136
+ _op_str_mapping = {
137
+ operator.add: "+",
138
+ roperator.radd: "+",
139
+ operator.mul: "*",
140
+ roperator.rmul: "*",
141
+ operator.sub: "-",
142
+ roperator.rsub: "-",
143
+ operator.truediv: "/",
144
+ roperator.rtruediv: "/",
145
+ # floordiv not supported by numexpr 2.x
146
+ operator.floordiv: None,
147
+ roperator.rfloordiv: None,
148
+ # we require Python semantics for mod of negative for backwards compatibility
149
+ # see https://github.com/pydata/numexpr/issues/365
150
+ # so sticking with unaccelerated for now GH#36552
151
+ operator.mod: None,
152
+ roperator.rmod: None,
153
+ operator.pow: "**",
154
+ roperator.rpow: "**",
155
+ operator.eq: "==",
156
+ operator.ne: "!=",
157
+ operator.le: "<=",
158
+ operator.lt: "<",
159
+ operator.ge: ">=",
160
+ operator.gt: ">",
161
+ operator.and_: "&",
162
+ roperator.rand_: "&",
163
+ operator.or_: "|",
164
+ roperator.ror_: "|",
165
+ operator.xor: "^",
166
+ roperator.rxor: "^",
167
+ divmod: None,
168
+ roperator.rdivmod: None,
169
+ }
170
+
171
+
172
+ def _where_standard(cond, a, b):
173
+ # Caller is responsible for extracting ndarray if necessary
174
+ return np.where(cond, a, b)
175
+
176
+
177
+ def _where_numexpr(cond, a, b):
178
+ # Caller is responsible for extracting ndarray if necessary
179
+ result = None
180
+
181
+ if _can_use_numexpr(None, "where", a, b, "where"):
182
+ result = ne.evaluate(
183
+ "where(cond_value, a_value, b_value)",
184
+ local_dict={"cond_value": cond, "a_value": a, "b_value": b},
185
+ casting="safe",
186
+ )
187
+
188
+ if result is None:
189
+ result = _where_standard(cond, a, b)
190
+
191
+ return result
192
+
193
+
194
+ # turn myself on
195
+ set_use_numexpr(get_option("compute.use_numexpr"))
196
+
197
+
198
+ def _has_bool_dtype(x):
199
+ try:
200
+ return x.dtype == bool
201
+ except AttributeError:
202
+ return isinstance(x, (bool, np.bool_))
203
+
204
+
205
+ _BOOL_OP_UNSUPPORTED = {"+": "|", "*": "&", "-": "^"}
206
+
207
+
208
+ def _bool_arith_fallback(op_str, a, b) -> bool:
209
+ """
210
+ Check if we should fallback to the python `_evaluate_standard` in case
211
+ of an unsupported operation by numexpr, which is the case for some
212
+ boolean ops.
213
+ """
214
+ if _has_bool_dtype(a) and _has_bool_dtype(b):
215
+ if op_str in _BOOL_OP_UNSUPPORTED:
216
+ warnings.warn(
217
+ f"evaluating in Python space because the {repr(op_str)} "
218
+ "operator is not supported by numexpr for the bool dtype, "
219
+ f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.",
220
+ stacklevel=find_stack_level(),
221
+ )
222
+ return True
223
+ return False
224
+
225
+
226
+ def evaluate(op, a, b, use_numexpr: bool = True):
227
+ """
228
+ Evaluate and return the expression of the op on a and b.
229
+
230
+ Parameters
231
+ ----------
232
+ op : the actual operand
233
+ a : left operand
234
+ b : right operand
235
+ use_numexpr : bool, default True
236
+ Whether to try to use numexpr.
237
+ """
238
+ op_str = _op_str_mapping[op]
239
+ if op_str is not None:
240
+ if use_numexpr:
241
+ # error: "None" not callable
242
+ return _evaluate(op, op_str, a, b) # type: ignore[misc]
243
+ return _evaluate_standard(op, op_str, a, b)
244
+
245
+
246
+ def where(cond, a, b, use_numexpr: bool = True):
247
+ """
248
+ Evaluate the where condition cond on a and b.
249
+
250
+ Parameters
251
+ ----------
252
+ cond : np.ndarray[bool]
253
+ a : return if cond is True
254
+ b : return if cond is False
255
+ use_numexpr : bool, default True
256
+ Whether to try to use numexpr.
257
+ """
258
+ assert _where is not None
259
+ return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b)
260
+
261
+
262
+ def set_test_mode(v: bool = True) -> None:
263
+ """
264
+ Keeps track of whether numexpr was used.
265
+
266
+ Stores an additional ``True`` for every successful use of evaluate with
267
+ numexpr since the last ``get_test_result``.
268
+ """
269
+ global _TEST_MODE, _TEST_RESULT
270
+ _TEST_MODE = v
271
+ _TEST_RESULT = []
272
+
273
+
274
+ def _store_test_result(used_numexpr: bool) -> None:
275
+ if used_numexpr:
276
+ _TEST_RESULT.append(used_numexpr)
277
+
278
+
279
+ def get_test_result() -> list[bool]:
280
+ """
281
+ Get test result and reset test_results.
282
+ """
283
+ global _TEST_RESULT
284
+ res = _TEST_RESULT
285
+ _TEST_RESULT = []
286
+ return res
env-llmeval/lib/python3.10/site-packages/pandas/core/computation/pytables.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ manage PyTables query interface via Expressions """
2
+ from __future__ import annotations
3
+
4
+ import ast
5
+ from decimal import (
6
+ Decimal,
7
+ InvalidOperation,
8
+ )
9
+ from functools import partial
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ ClassVar,
14
+ )
15
+
16
+ import numpy as np
17
+
18
+ from pandas._libs.tslibs import (
19
+ Timedelta,
20
+ Timestamp,
21
+ )
22
+ from pandas.errors import UndefinedVariableError
23
+
24
+ from pandas.core.dtypes.common import is_list_like
25
+
26
+ import pandas.core.common as com
27
+ from pandas.core.computation import (
28
+ expr,
29
+ ops,
30
+ scope as _scope,
31
+ )
32
+ from pandas.core.computation.common import ensure_decoded
33
+ from pandas.core.computation.expr import BaseExprVisitor
34
+ from pandas.core.computation.ops import is_term
35
+ from pandas.core.construction import extract_array
36
+ from pandas.core.indexes.base import Index
37
+
38
+ from pandas.io.formats.printing import (
39
+ pprint_thing,
40
+ pprint_thing_encoded,
41
+ )
42
+
43
+ if TYPE_CHECKING:
44
+ from pandas._typing import (
45
+ Self,
46
+ npt,
47
+ )
48
+
49
+
50
+ class PyTablesScope(_scope.Scope):
51
+ __slots__ = ("queryables",)
52
+
53
+ queryables: dict[str, Any]
54
+
55
+ def __init__(
56
+ self,
57
+ level: int,
58
+ global_dict=None,
59
+ local_dict=None,
60
+ queryables: dict[str, Any] | None = None,
61
+ ) -> None:
62
+ super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
63
+ self.queryables = queryables or {}
64
+
65
+
66
+ class Term(ops.Term):
67
+ env: PyTablesScope
68
+
69
+ def __new__(cls, name, env, side=None, encoding=None):
70
+ if isinstance(name, str):
71
+ klass = cls
72
+ else:
73
+ klass = Constant
74
+ return object.__new__(klass)
75
+
76
+ def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None:
77
+ super().__init__(name, env, side=side, encoding=encoding)
78
+
79
+ def _resolve_name(self):
80
+ # must be a queryables
81
+ if self.side == "left":
82
+ # Note: The behavior of __new__ ensures that self.name is a str here
83
+ if self.name not in self.env.queryables:
84
+ raise NameError(f"name {repr(self.name)} is not defined")
85
+ return self.name
86
+
87
+ # resolve the rhs (and allow it to be None)
88
+ try:
89
+ return self.env.resolve(self.name, is_local=False)
90
+ except UndefinedVariableError:
91
+ return self.name
92
+
93
+ # read-only property overwriting read/write property
94
+ @property # type: ignore[misc]
95
+ def value(self):
96
+ return self._value
97
+
98
+
99
+ class Constant(Term):
100
+ def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None:
101
+ assert isinstance(env, PyTablesScope), type(env)
102
+ super().__init__(name, env, side=side, encoding=encoding)
103
+
104
+ def _resolve_name(self):
105
+ return self._name
106
+
107
+
108
+ class BinOp(ops.BinOp):
109
+ _max_selectors = 31
110
+
111
+ op: str
112
+ queryables: dict[str, Any]
113
+ condition: str | None
114
+
115
+ def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding) -> None:
116
+ super().__init__(op, lhs, rhs)
117
+ self.queryables = queryables
118
+ self.encoding = encoding
119
+ self.condition = None
120
+
121
+ def _disallow_scalar_only_bool_ops(self) -> None:
122
+ pass
123
+
124
+ def prune(self, klass):
125
+ def pr(left, right):
126
+ """create and return a new specialized BinOp from myself"""
127
+ if left is None:
128
+ return right
129
+ elif right is None:
130
+ return left
131
+
132
+ k = klass
133
+ if isinstance(left, ConditionBinOp):
134
+ if isinstance(right, ConditionBinOp):
135
+ k = JointConditionBinOp
136
+ elif isinstance(left, k):
137
+ return left
138
+ elif isinstance(right, k):
139
+ return right
140
+
141
+ elif isinstance(left, FilterBinOp):
142
+ if isinstance(right, FilterBinOp):
143
+ k = JointFilterBinOp
144
+ elif isinstance(left, k):
145
+ return left
146
+ elif isinstance(right, k):
147
+ return right
148
+
149
+ return k(
150
+ self.op, left, right, queryables=self.queryables, encoding=self.encoding
151
+ ).evaluate()
152
+
153
+ left, right = self.lhs, self.rhs
154
+
155
+ if is_term(left) and is_term(right):
156
+ res = pr(left.value, right.value)
157
+ elif not is_term(left) and is_term(right):
158
+ res = pr(left.prune(klass), right.value)
159
+ elif is_term(left) and not is_term(right):
160
+ res = pr(left.value, right.prune(klass))
161
+ elif not (is_term(left) or is_term(right)):
162
+ res = pr(left.prune(klass), right.prune(klass))
163
+
164
+ return res
165
+
166
+ def conform(self, rhs):
167
+ """inplace conform rhs"""
168
+ if not is_list_like(rhs):
169
+ rhs = [rhs]
170
+ if isinstance(rhs, np.ndarray):
171
+ rhs = rhs.ravel()
172
+ return rhs
173
+
174
+ @property
175
+ def is_valid(self) -> bool:
176
+ """return True if this is a valid field"""
177
+ return self.lhs in self.queryables
178
+
179
+ @property
180
+ def is_in_table(self) -> bool:
181
+ """
182
+ return True if this is a valid column name for generation (e.g. an
183
+ actual column in the table)
184
+ """
185
+ return self.queryables.get(self.lhs) is not None
186
+
187
+ @property
188
+ def kind(self):
189
+ """the kind of my field"""
190
+ return getattr(self.queryables.get(self.lhs), "kind", None)
191
+
192
+ @property
193
+ def meta(self):
194
+ """the meta of my field"""
195
+ return getattr(self.queryables.get(self.lhs), "meta", None)
196
+
197
+ @property
198
+ def metadata(self):
199
+ """the metadata of my field"""
200
+ return getattr(self.queryables.get(self.lhs), "metadata", None)
201
+
202
+ def generate(self, v) -> str:
203
+ """create and return the op string for this TermValue"""
204
+ val = v.tostring(self.encoding)
205
+ return f"({self.lhs} {self.op} {val})"
206
+
207
+ def convert_value(self, v) -> TermValue:
208
+ """
209
+ convert the expression that is in the term to something that is
210
+ accepted by pytables
211
+ """
212
+
213
+ def stringify(value):
214
+ if self.encoding is not None:
215
+ return pprint_thing_encoded(value, encoding=self.encoding)
216
+ return pprint_thing(value)
217
+
218
+ kind = ensure_decoded(self.kind)
219
+ meta = ensure_decoded(self.meta)
220
+ if kind == "datetime" or (kind and kind.startswith("datetime64")):
221
+ if isinstance(v, (int, float)):
222
+ v = stringify(v)
223
+ v = ensure_decoded(v)
224
+ v = Timestamp(v).as_unit("ns")
225
+ if v.tz is not None:
226
+ v = v.tz_convert("UTC")
227
+ return TermValue(v, v._value, kind)
228
+ elif kind in ("timedelta64", "timedelta"):
229
+ if isinstance(v, str):
230
+ v = Timedelta(v)
231
+ else:
232
+ v = Timedelta(v, unit="s")
233
+ v = v.as_unit("ns")._value
234
+ return TermValue(int(v), v, kind)
235
+ elif meta == "category":
236
+ metadata = extract_array(self.metadata, extract_numpy=True)
237
+ result: npt.NDArray[np.intp] | np.intp | int
238
+ if v not in metadata:
239
+ result = -1
240
+ else:
241
+ result = metadata.searchsorted(v, side="left")
242
+ return TermValue(result, result, "integer")
243
+ elif kind == "integer":
244
+ try:
245
+ v_dec = Decimal(v)
246
+ except InvalidOperation:
247
+ # GH 54186
248
+ # convert v to float to raise float's ValueError
249
+ float(v)
250
+ else:
251
+ v = int(v_dec.to_integral_exact(rounding="ROUND_HALF_EVEN"))
252
+ return TermValue(v, v, kind)
253
+ elif kind == "float":
254
+ v = float(v)
255
+ return TermValue(v, v, kind)
256
+ elif kind == "bool":
257
+ if isinstance(v, str):
258
+ v = v.strip().lower() not in [
259
+ "false",
260
+ "f",
261
+ "no",
262
+ "n",
263
+ "none",
264
+ "0",
265
+ "[]",
266
+ "{}",
267
+ "",
268
+ ]
269
+ else:
270
+ v = bool(v)
271
+ return TermValue(v, v, kind)
272
+ elif isinstance(v, str):
273
+ # string quoting
274
+ return TermValue(v, stringify(v), "string")
275
+ else:
276
+ raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
277
+
278
+ def convert_values(self) -> None:
279
+ pass
280
+
281
+
282
+ class FilterBinOp(BinOp):
283
+ filter: tuple[Any, Any, Index] | None = None
284
+
285
+ def __repr__(self) -> str:
286
+ if self.filter is None:
287
+ return "Filter: Not Initialized"
288
+ return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
289
+
290
+ def invert(self) -> Self:
291
+ """invert the filter"""
292
+ if self.filter is not None:
293
+ self.filter = (
294
+ self.filter[0],
295
+ self.generate_filter_op(invert=True),
296
+ self.filter[2],
297
+ )
298
+ return self
299
+
300
+ def format(self):
301
+ """return the actual filter format"""
302
+ return [self.filter]
303
+
304
+ # error: Signature of "evaluate" incompatible with supertype "BinOp"
305
+ def evaluate(self) -> Self | None: # type: ignore[override]
306
+ if not self.is_valid:
307
+ raise ValueError(f"query term is not valid [{self}]")
308
+
309
+ rhs = self.conform(self.rhs)
310
+ values = list(rhs)
311
+
312
+ if self.is_in_table:
313
+ # if too many values to create the expression, use a filter instead
314
+ if self.op in ["==", "!="] and len(values) > self._max_selectors:
315
+ filter_op = self.generate_filter_op()
316
+ self.filter = (self.lhs, filter_op, Index(values))
317
+
318
+ return self
319
+ return None
320
+
321
+ # equality conditions
322
+ if self.op in ["==", "!="]:
323
+ filter_op = self.generate_filter_op()
324
+ self.filter = (self.lhs, filter_op, Index(values))
325
+
326
+ else:
327
+ raise TypeError(
328
+ f"passing a filterable condition to a non-table indexer [{self}]"
329
+ )
330
+
331
+ return self
332
+
333
+ def generate_filter_op(self, invert: bool = False):
334
+ if (self.op == "!=" and not invert) or (self.op == "==" and invert):
335
+ return lambda axis, vals: ~axis.isin(vals)
336
+ else:
337
+ return lambda axis, vals: axis.isin(vals)
338
+
339
+
340
+ class JointFilterBinOp(FilterBinOp):
341
+ def format(self):
342
+ raise NotImplementedError("unable to collapse Joint Filters")
343
+
344
+ # error: Signature of "evaluate" incompatible with supertype "BinOp"
345
+ def evaluate(self) -> Self: # type: ignore[override]
346
+ return self
347
+
348
+
349
+ class ConditionBinOp(BinOp):
350
+ def __repr__(self) -> str:
351
+ return pprint_thing(f"[Condition : [{self.condition}]]")
352
+
353
+ def invert(self):
354
+ """invert the condition"""
355
+ # if self.condition is not None:
356
+ # self.condition = "~(%s)" % self.condition
357
+ # return self
358
+ raise NotImplementedError(
359
+ "cannot use an invert condition when passing to numexpr"
360
+ )
361
+
362
+ def format(self):
363
+ """return the actual ne format"""
364
+ return self.condition
365
+
366
+ # error: Signature of "evaluate" incompatible with supertype "BinOp"
367
+ def evaluate(self) -> Self | None: # type: ignore[override]
368
+ if not self.is_valid:
369
+ raise ValueError(f"query term is not valid [{self}]")
370
+
371
+ # convert values if we are in the table
372
+ if not self.is_in_table:
373
+ return None
374
+
375
+ rhs = self.conform(self.rhs)
376
+ values = [self.convert_value(v) for v in rhs]
377
+
378
+ # equality conditions
379
+ if self.op in ["==", "!="]:
380
+ # too many values to create the expression?
381
+ if len(values) <= self._max_selectors:
382
+ vs = [self.generate(v) for v in values]
383
+ self.condition = f"({' | '.join(vs)})"
384
+
385
+ # use a filter after reading
386
+ else:
387
+ return None
388
+ else:
389
+ self.condition = self.generate(values[0])
390
+
391
+ return self
392
+
393
+
394
+ class JointConditionBinOp(ConditionBinOp):
395
+ # error: Signature of "evaluate" incompatible with supertype "BinOp"
396
+ def evaluate(self) -> Self: # type: ignore[override]
397
+ self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
398
+ return self
399
+
400
+
401
+ class UnaryOp(ops.UnaryOp):
402
+ def prune(self, klass):
403
+ if self.op != "~":
404
+ raise NotImplementedError("UnaryOp only support invert type ops")
405
+
406
+ operand = self.operand
407
+ operand = operand.prune(klass)
408
+
409
+ if operand is not None and (
410
+ issubclass(klass, ConditionBinOp)
411
+ and operand.condition is not None
412
+ or not issubclass(klass, ConditionBinOp)
413
+ and issubclass(klass, FilterBinOp)
414
+ and operand.filter is not None
415
+ ):
416
+ return operand.invert()
417
+ return None
418
+
419
+
420
+ class PyTablesExprVisitor(BaseExprVisitor):
421
+ const_type: ClassVar[type[ops.Term]] = Constant
422
+ term_type: ClassVar[type[Term]] = Term
423
+
424
+ def __init__(self, env, engine, parser, **kwargs) -> None:
425
+ super().__init__(env, engine, parser)
426
+ for bin_op in self.binary_ops:
427
+ bin_node = self.binary_op_nodes_map[bin_op]
428
+ setattr(
429
+ self,
430
+ f"visit_{bin_node}",
431
+ lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
432
+ )
433
+
434
+ def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None:
435
+ if isinstance(node.op, (ast.Not, ast.Invert)):
436
+ return UnaryOp("~", self.visit(node.operand))
437
+ elif isinstance(node.op, ast.USub):
438
+ return self.const_type(-self.visit(node.operand).value, self.env)
439
+ elif isinstance(node.op, ast.UAdd):
440
+ raise NotImplementedError("Unary addition not supported")
441
+ # TODO: return None might never be reached
442
+ return None
443
+
444
+ def visit_Index(self, node, **kwargs):
445
+ return self.visit(node.value).value
446
+
447
+ def visit_Assign(self, node, **kwargs):
448
+ cmpr = ast.Compare(
449
+ ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
450
+ )
451
+ return self.visit(cmpr)
452
+
453
+ def visit_Subscript(self, node, **kwargs) -> ops.Term:
454
+ # only allow simple subscripts
455
+
456
+ value = self.visit(node.value)
457
+ slobj = self.visit(node.slice)
458
+ try:
459
+ value = value.value
460
+ except AttributeError:
461
+ pass
462
+
463
+ if isinstance(slobj, Term):
464
+ # In py39 np.ndarray lookups with Term containing int raise
465
+ slobj = slobj.value
466
+
467
+ try:
468
+ return self.const_type(value[slobj], self.env)
469
+ except TypeError as err:
470
+ raise ValueError(
471
+ f"cannot subscript {repr(value)} with {repr(slobj)}"
472
+ ) from err
473
+
474
+ def visit_Attribute(self, node, **kwargs):
475
+ attr = node.attr
476
+ value = node.value
477
+
478
+ ctx = type(node.ctx)
479
+ if ctx == ast.Load:
480
+ # resolve the value
481
+ resolved = self.visit(value)
482
+
483
+ # try to get the value to see if we are another expression
484
+ try:
485
+ resolved = resolved.value
486
+ except AttributeError:
487
+ pass
488
+
489
+ try:
490
+ return self.term_type(getattr(resolved, attr), self.env)
491
+ except AttributeError:
492
+ # something like datetime.datetime where scope is overridden
493
+ if isinstance(value, ast.Name) and value.id == attr:
494
+ return resolved
495
+
496
+ raise ValueError(f"Invalid Attribute context {ctx.__name__}")
497
+
498
+ def translate_In(self, op):
499
+ return ast.Eq() if isinstance(op, ast.In) else op
500
+
501
+ def _rewrite_membership_op(self, node, left, right):
502
+ return self.visit(node.op), node.op, left, right
503
+
504
+
505
+ def _validate_where(w):
506
+ """
507
+ Validate that the where statement is of the right type.
508
+
509
+ The type may either be String, Expr, or list-like of Exprs.
510
+
511
+ Parameters
512
+ ----------
513
+ w : String term expression, Expr, or list-like of Exprs.
514
+
515
+ Returns
516
+ -------
517
+ where : The original where clause if the check was successful.
518
+
519
+ Raises
520
+ ------
521
+ TypeError : An invalid data type was passed in for w (e.g. dict).
522
+ """
523
+ if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
524
+ raise TypeError(
525
+ "where must be passed as a string, PyTablesExpr, "
526
+ "or list-like of PyTablesExpr"
527
+ )
528
+
529
+ return w
530
+
531
+
532
+ class PyTablesExpr(expr.Expr):
533
+ """
534
+ Hold a pytables-like expression, comprised of possibly multiple 'terms'.
535
+
536
+ Parameters
537
+ ----------
538
+ where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
539
+ queryables : a "kinds" map (dict of column name -> kind), or None if column
540
+ is non-indexable
541
+ encoding : an encoding that will encode the query terms
542
+
543
+ Returns
544
+ -------
545
+ a PyTablesExpr object
546
+
547
+ Examples
548
+ --------
549
+ 'index>=date'
550
+ "columns=['A', 'D']"
551
+ 'columns=A'
552
+ 'columns==A'
553
+ "~(columns=['A','B'])"
554
+ 'index>df.index[3] & string="bar"'
555
+ '(index>df.index[3] & index<=df.index[6]) | string="bar"'
556
+ "ts>=Timestamp('2012-02-01')"
557
+ "major_axis>=20130101"
558
+ """
559
+
560
+ _visitor: PyTablesExprVisitor | None
561
+ env: PyTablesScope
562
+ expr: str
563
+
564
+ def __init__(
565
+ self,
566
+ where,
567
+ queryables: dict[str, Any] | None = None,
568
+ encoding=None,
569
+ scope_level: int = 0,
570
+ ) -> None:
571
+ where = _validate_where(where)
572
+
573
+ self.encoding = encoding
574
+ self.condition = None
575
+ self.filter = None
576
+ self.terms = None
577
+ self._visitor = None
578
+
579
+ # capture the environment if needed
580
+ local_dict: _scope.DeepChainMap[Any, Any] | None = None
581
+
582
+ if isinstance(where, PyTablesExpr):
583
+ local_dict = where.env.scope
584
+ _where = where.expr
585
+
586
+ elif is_list_like(where):
587
+ where = list(where)
588
+ for idx, w in enumerate(where):
589
+ if isinstance(w, PyTablesExpr):
590
+ local_dict = w.env.scope
591
+ else:
592
+ where[idx] = _validate_where(w)
593
+ _where = " & ".join([f"({w})" for w in com.flatten(where)])
594
+ else:
595
+ # _validate_where ensures we otherwise have a string
596
+ _where = where
597
+
598
+ self.expr = _where
599
+ self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
600
+
601
+ if queryables is not None and isinstance(self.expr, str):
602
+ self.env.queryables.update(queryables)
603
+ self._visitor = PyTablesExprVisitor(
604
+ self.env,
605
+ queryables=queryables,
606
+ parser="pytables",
607
+ engine="pytables",
608
+ encoding=encoding,
609
+ )
610
+ self.terms = self.parse()
611
+
612
+ def __repr__(self) -> str:
613
+ if self.terms is not None:
614
+ return pprint_thing(self.terms)
615
+ return pprint_thing(self.expr)
616
+
617
+ def evaluate(self):
618
+ """create and return the numexpr condition and filter"""
619
+ try:
620
+ self.condition = self.terms.prune(ConditionBinOp)
621
+ except AttributeError as err:
622
+ raise ValueError(
623
+ f"cannot process expression [{self.expr}], [{self}] "
624
+ "is not a valid condition"
625
+ ) from err
626
+ try:
627
+ self.filter = self.terms.prune(FilterBinOp)
628
+ except AttributeError as err:
629
+ raise ValueError(
630
+ f"cannot process expression [{self.expr}], [{self}] "
631
+ "is not a valid filter"
632
+ ) from err
633
+
634
+ return self.condition, self.filter
635
+
636
+
637
+ class TermValue:
638
+ """hold a term value the we use to construct a condition/filter"""
639
+
640
+ def __init__(self, value, converted, kind: str) -> None:
641
+ assert isinstance(kind, str), kind
642
+ self.value = value
643
+ self.converted = converted
644
+ self.kind = kind
645
+
646
+ def tostring(self, encoding) -> str:
647
+ """quote the string if not encoded else encode and return"""
648
+ if self.kind == "string":
649
+ if encoding is not None:
650
+ return str(self.converted)
651
+ return f'"{self.converted}"'
652
+ elif self.kind == "float":
653
+ # python 2 str(float) is not always
654
+ # round-trippable so use repr()
655
+ return repr(self.converted)
656
+ return str(self.converted)
657
+
658
+
659
+ def maybe_expression(s) -> bool:
660
+ """loose checking if s is a pytables-acceptable expression"""
661
+ if not isinstance(s, str):
662
+ return False
663
+ operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
664
+
665
+ # make sure we have an op at least
666
+ return any(op in s for op in operations)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc ADDED
Binary file (196 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/category.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimelike.cpython-310.pyc ADDED
Binary file (21.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/datetimes.cpython-310.pyc ADDED
Binary file (32.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/extension.cpython-310.pyc ADDED
Binary file (5.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/frozen.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/interval.cpython-310.pyc ADDED
Binary file (28.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc ADDED
Binary file (108 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/period.cpython-310.pyc ADDED
Binary file (16.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/range.cpython-310.pyc ADDED
Binary file (29.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/accessors.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ datetimelike delegation
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ cast,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import lib
15
+ from pandas.util._exceptions import find_stack_level
16
+
17
+ from pandas.core.dtypes.common import (
18
+ is_integer_dtype,
19
+ is_list_like,
20
+ )
21
+ from pandas.core.dtypes.dtypes import (
22
+ ArrowDtype,
23
+ CategoricalDtype,
24
+ DatetimeTZDtype,
25
+ PeriodDtype,
26
+ )
27
+ from pandas.core.dtypes.generic import ABCSeries
28
+
29
+ from pandas.core.accessor import (
30
+ PandasDelegate,
31
+ delegate_names,
32
+ )
33
+ from pandas.core.arrays import (
34
+ DatetimeArray,
35
+ PeriodArray,
36
+ TimedeltaArray,
37
+ )
38
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
39
+ from pandas.core.base import (
40
+ NoNewAttributesMixin,
41
+ PandasObject,
42
+ )
43
+ from pandas.core.indexes.datetimes import DatetimeIndex
44
+ from pandas.core.indexes.timedeltas import TimedeltaIndex
45
+
46
+ if TYPE_CHECKING:
47
+ from pandas import (
48
+ DataFrame,
49
+ Series,
50
+ )
51
+
52
+
53
+ class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
54
+ _hidden_attrs = PandasObject._hidden_attrs | {
55
+ "orig",
56
+ "name",
57
+ }
58
+
59
+ def __init__(self, data: Series, orig) -> None:
60
+ if not isinstance(data, ABCSeries):
61
+ raise TypeError(
62
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
63
+ )
64
+
65
+ self._parent = data
66
+ self.orig = orig
67
+ self.name = getattr(data, "name", None)
68
+ self._freeze()
69
+
70
+ def _get_values(self):
71
+ data = self._parent
72
+ if lib.is_np_dtype(data.dtype, "M"):
73
+ return DatetimeIndex(data, copy=False, name=self.name)
74
+
75
+ elif isinstance(data.dtype, DatetimeTZDtype):
76
+ return DatetimeIndex(data, copy=False, name=self.name)
77
+
78
+ elif lib.is_np_dtype(data.dtype, "m"):
79
+ return TimedeltaIndex(data, copy=False, name=self.name)
80
+
81
+ elif isinstance(data.dtype, PeriodDtype):
82
+ return PeriodArray(data, copy=False)
83
+
84
+ raise TypeError(
85
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
86
+ )
87
+
88
+ def _delegate_property_get(self, name: str):
89
+ from pandas import Series
90
+
91
+ values = self._get_values()
92
+
93
+ result = getattr(values, name)
94
+
95
+ # maybe need to upcast (ints)
96
+ if isinstance(result, np.ndarray):
97
+ if is_integer_dtype(result):
98
+ result = result.astype("int64")
99
+ elif not is_list_like(result):
100
+ return result
101
+
102
+ result = np.asarray(result)
103
+
104
+ if self.orig is not None:
105
+ index = self.orig.index
106
+ else:
107
+ index = self._parent.index
108
+ # return the result as a Series
109
+ result = Series(result, index=index, name=self.name).__finalize__(self._parent)
110
+
111
+ # setting this object will show a SettingWithCopyWarning/Error
112
+ result._is_copy = (
113
+ "modifications to a property of a datetimelike "
114
+ "object are not supported and are discarded. "
115
+ "Change values on the original."
116
+ )
117
+
118
+ return result
119
+
120
+ def _delegate_property_set(self, name: str, value, *args, **kwargs):
121
+ raise ValueError(
122
+ "modifications to a property of a datetimelike object are not supported. "
123
+ "Change values on the original."
124
+ )
125
+
126
+ def _delegate_method(self, name: str, *args, **kwargs):
127
+ from pandas import Series
128
+
129
+ values = self._get_values()
130
+
131
+ method = getattr(values, name)
132
+ result = method(*args, **kwargs)
133
+
134
+ if not is_list_like(result):
135
+ return result
136
+
137
+ result = Series(result, index=self._parent.index, name=self.name).__finalize__(
138
+ self._parent
139
+ )
140
+
141
+ # setting this object will show a SettingWithCopyWarning/Error
142
+ result._is_copy = (
143
+ "modifications to a method of a datetimelike "
144
+ "object are not supported and are discarded. "
145
+ "Change values on the original."
146
+ )
147
+
148
+ return result
149
+
150
+
151
+ @delegate_names(
152
+ delegate=ArrowExtensionArray,
153
+ accessors=TimedeltaArray._datetimelike_ops,
154
+ typ="property",
155
+ accessor_mapping=lambda x: f"_dt_{x}",
156
+ raise_on_missing=False,
157
+ )
158
+ @delegate_names(
159
+ delegate=ArrowExtensionArray,
160
+ accessors=TimedeltaArray._datetimelike_methods,
161
+ typ="method",
162
+ accessor_mapping=lambda x: f"_dt_{x}",
163
+ raise_on_missing=False,
164
+ )
165
+ @delegate_names(
166
+ delegate=ArrowExtensionArray,
167
+ accessors=DatetimeArray._datetimelike_ops,
168
+ typ="property",
169
+ accessor_mapping=lambda x: f"_dt_{x}",
170
+ raise_on_missing=False,
171
+ )
172
+ @delegate_names(
173
+ delegate=ArrowExtensionArray,
174
+ accessors=DatetimeArray._datetimelike_methods,
175
+ typ="method",
176
+ accessor_mapping=lambda x: f"_dt_{x}",
177
+ raise_on_missing=False,
178
+ )
179
+ class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin):
180
+ def __init__(self, data: Series, orig) -> None:
181
+ if not isinstance(data, ABCSeries):
182
+ raise TypeError(
183
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
184
+ )
185
+
186
+ self._parent = data
187
+ self._orig = orig
188
+ self._freeze()
189
+
190
+ def _delegate_property_get(self, name: str):
191
+ if not hasattr(self._parent.array, f"_dt_{name}"):
192
+ raise NotImplementedError(
193
+ f"dt.{name} is not supported for {self._parent.dtype}"
194
+ )
195
+ result = getattr(self._parent.array, f"_dt_{name}")
196
+
197
+ if not is_list_like(result):
198
+ return result
199
+
200
+ if self._orig is not None:
201
+ index = self._orig.index
202
+ else:
203
+ index = self._parent.index
204
+ # return the result as a Series, which is by definition a copy
205
+ result = type(self._parent)(
206
+ result, index=index, name=self._parent.name
207
+ ).__finalize__(self._parent)
208
+
209
+ return result
210
+
211
+ def _delegate_method(self, name: str, *args, **kwargs):
212
+ if not hasattr(self._parent.array, f"_dt_{name}"):
213
+ raise NotImplementedError(
214
+ f"dt.{name} is not supported for {self._parent.dtype}"
215
+ )
216
+
217
+ result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs)
218
+
219
+ if self._orig is not None:
220
+ index = self._orig.index
221
+ else:
222
+ index = self._parent.index
223
+ # return the result as a Series, which is by definition a copy
224
+ result = type(self._parent)(
225
+ result, index=index, name=self._parent.name
226
+ ).__finalize__(self._parent)
227
+
228
+ return result
229
+
230
+ def to_pytimedelta(self):
231
+ return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta()
232
+
233
+ def to_pydatetime(self):
234
+ # GH#20306
235
+ warnings.warn(
236
+ f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
237
+ "in a future version this will return a Series containing python "
238
+ "datetime objects instead of an ndarray. To retain the old behavior, "
239
+ "call `np.array` on the result",
240
+ FutureWarning,
241
+ stacklevel=find_stack_level(),
242
+ )
243
+ return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime()
244
+
245
+ def isocalendar(self) -> DataFrame:
246
+ from pandas import DataFrame
247
+
248
+ result = (
249
+ cast(ArrowExtensionArray, self._parent.array)
250
+ ._dt_isocalendar()
251
+ ._pa_array.combine_chunks()
252
+ )
253
+ iso_calendar_df = DataFrame(
254
+ {
255
+ col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg]
256
+ for i, col in enumerate(["year", "week", "day"])
257
+ }
258
+ )
259
+ return iso_calendar_df
260
+
261
+ @property
262
+ def components(self) -> DataFrame:
263
+ from pandas import DataFrame
264
+
265
+ components_df = DataFrame(
266
+ {
267
+ col: getattr(self._parent.array, f"_dt_{col}")
268
+ for col in [
269
+ "days",
270
+ "hours",
271
+ "minutes",
272
+ "seconds",
273
+ "milliseconds",
274
+ "microseconds",
275
+ "nanoseconds",
276
+ ]
277
+ }
278
+ )
279
+ return components_df
280
+
281
+
282
+ @delegate_names(
283
+ delegate=DatetimeArray,
284
+ accessors=DatetimeArray._datetimelike_ops + ["unit"],
285
+ typ="property",
286
+ )
287
+ @delegate_names(
288
+ delegate=DatetimeArray,
289
+ accessors=DatetimeArray._datetimelike_methods + ["as_unit"],
290
+ typ="method",
291
+ )
292
+ class DatetimeProperties(Properties):
293
+ """
294
+ Accessor object for datetimelike properties of the Series values.
295
+
296
+ Examples
297
+ --------
298
+ >>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s"))
299
+ >>> seconds_series
300
+ 0 2000-01-01 00:00:00
301
+ 1 2000-01-01 00:00:01
302
+ 2 2000-01-01 00:00:02
303
+ dtype: datetime64[ns]
304
+ >>> seconds_series.dt.second
305
+ 0 0
306
+ 1 1
307
+ 2 2
308
+ dtype: int32
309
+
310
+ >>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h"))
311
+ >>> hours_series
312
+ 0 2000-01-01 00:00:00
313
+ 1 2000-01-01 01:00:00
314
+ 2 2000-01-01 02:00:00
315
+ dtype: datetime64[ns]
316
+ >>> hours_series.dt.hour
317
+ 0 0
318
+ 1 1
319
+ 2 2
320
+ dtype: int32
321
+
322
+ >>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="QE"))
323
+ >>> quarters_series
324
+ 0 2000-03-31
325
+ 1 2000-06-30
326
+ 2 2000-09-30
327
+ dtype: datetime64[ns]
328
+ >>> quarters_series.dt.quarter
329
+ 0 1
330
+ 1 2
331
+ 2 3
332
+ dtype: int32
333
+
334
+ Returns a Series indexed like the original Series.
335
+ Raises TypeError if the Series does not contain datetimelike values.
336
+ """
337
+
338
+ def to_pydatetime(self) -> np.ndarray:
339
+ """
340
+ Return the data as an array of :class:`datetime.datetime` objects.
341
+
342
+ .. deprecated:: 2.1.0
343
+
344
+ The current behavior of dt.to_pydatetime is deprecated.
345
+ In a future version this will return a Series containing python
346
+ datetime objects instead of a ndarray.
347
+
348
+ Timezone information is retained if present.
349
+
350
+ .. warning::
351
+
352
+ Python's datetime uses microsecond resolution, which is lower than
353
+ pandas (nanosecond). The values are truncated.
354
+
355
+ Returns
356
+ -------
357
+ numpy.ndarray
358
+ Object dtype array containing native Python datetime objects.
359
+
360
+ See Also
361
+ --------
362
+ datetime.datetime : Standard library value for a datetime.
363
+
364
+ Examples
365
+ --------
366
+ >>> s = pd.Series(pd.date_range('20180310', periods=2))
367
+ >>> s
368
+ 0 2018-03-10
369
+ 1 2018-03-11
370
+ dtype: datetime64[ns]
371
+
372
+ >>> s.dt.to_pydatetime()
373
+ array([datetime.datetime(2018, 3, 10, 0, 0),
374
+ datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)
375
+
376
+ pandas' nanosecond precision is truncated to microseconds.
377
+
378
+ >>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))
379
+ >>> s
380
+ 0 2018-03-10 00:00:00.000000000
381
+ 1 2018-03-10 00:00:00.000000001
382
+ dtype: datetime64[ns]
383
+
384
+ >>> s.dt.to_pydatetime()
385
+ array([datetime.datetime(2018, 3, 10, 0, 0),
386
+ datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)
387
+ """
388
+ # GH#20306
389
+ warnings.warn(
390
+ f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, "
391
+ "in a future version this will return a Series containing python "
392
+ "datetime objects instead of an ndarray. To retain the old behavior, "
393
+ "call `np.array` on the result",
394
+ FutureWarning,
395
+ stacklevel=find_stack_level(),
396
+ )
397
+ return self._get_values().to_pydatetime()
398
+
399
+ @property
400
+ def freq(self):
401
+ return self._get_values().inferred_freq
402
+
403
+ def isocalendar(self) -> DataFrame:
404
+ """
405
+ Calculate year, week, and day according to the ISO 8601 standard.
406
+
407
+ Returns
408
+ -------
409
+ DataFrame
410
+ With columns year, week and day.
411
+
412
+ See Also
413
+ --------
414
+ Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
415
+ week number, and weekday for the given Timestamp object.
416
+ datetime.date.isocalendar : Return a named tuple object with
417
+ three components: year, week and weekday.
418
+
419
+ Examples
420
+ --------
421
+ >>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
422
+ >>> ser.dt.isocalendar()
423
+ year week day
424
+ 0 2009 53 5
425
+ 1 <NA> <NA> <NA>
426
+ >>> ser.dt.isocalendar().week
427
+ 0 53
428
+ 1 <NA>
429
+ Name: week, dtype: UInt32
430
+ """
431
+ return self._get_values().isocalendar().set_index(self._parent.index)
432
+
433
+
434
+ @delegate_names(
435
+ delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
436
+ )
437
+ @delegate_names(
438
+ delegate=TimedeltaArray,
439
+ accessors=TimedeltaArray._datetimelike_methods,
440
+ typ="method",
441
+ )
442
+ class TimedeltaProperties(Properties):
443
+ """
444
+ Accessor object for datetimelike properties of the Series values.
445
+
446
+ Returns a Series indexed like the original Series.
447
+ Raises TypeError if the Series does not contain datetimelike values.
448
+
449
+ Examples
450
+ --------
451
+ >>> seconds_series = pd.Series(
452
+ ... pd.timedelta_range(start="1 second", periods=3, freq="s")
453
+ ... )
454
+ >>> seconds_series
455
+ 0 0 days 00:00:01
456
+ 1 0 days 00:00:02
457
+ 2 0 days 00:00:03
458
+ dtype: timedelta64[ns]
459
+ >>> seconds_series.dt.seconds
460
+ 0 1
461
+ 1 2
462
+ 2 3
463
+ dtype: int32
464
+ """
465
+
466
+ def to_pytimedelta(self) -> np.ndarray:
467
+ """
468
+ Return an array of native :class:`datetime.timedelta` objects.
469
+
470
+ Python's standard `datetime` library uses a different representation
471
+ timedelta's. This method converts a Series of pandas Timedeltas
472
+ to `datetime.timedelta` format with the same length as the original
473
+ Series.
474
+
475
+ Returns
476
+ -------
477
+ numpy.ndarray
478
+ Array of 1D containing data with `datetime.timedelta` type.
479
+
480
+ See Also
481
+ --------
482
+ datetime.timedelta : A duration expressing the difference
483
+ between two date, time, or datetime.
484
+
485
+ Examples
486
+ --------
487
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
488
+ >>> s
489
+ 0 0 days
490
+ 1 1 days
491
+ 2 2 days
492
+ 3 3 days
493
+ 4 4 days
494
+ dtype: timedelta64[ns]
495
+
496
+ >>> s.dt.to_pytimedelta()
497
+ array([datetime.timedelta(0), datetime.timedelta(days=1),
498
+ datetime.timedelta(days=2), datetime.timedelta(days=3),
499
+ datetime.timedelta(days=4)], dtype=object)
500
+ """
501
+ return self._get_values().to_pytimedelta()
502
+
503
+ @property
504
+ def components(self):
505
+ """
506
+ Return a Dataframe of the components of the Timedeltas.
507
+
508
+ Returns
509
+ -------
510
+ DataFrame
511
+
512
+ Examples
513
+ --------
514
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
515
+ >>> s
516
+ 0 0 days 00:00:00
517
+ 1 0 days 00:00:01
518
+ 2 0 days 00:00:02
519
+ 3 0 days 00:00:03
520
+ 4 0 days 00:00:04
521
+ dtype: timedelta64[ns]
522
+ >>> s.dt.components
523
+ days hours minutes seconds milliseconds microseconds nanoseconds
524
+ 0 0 0 0 0 0 0 0
525
+ 1 0 0 0 1 0 0 0
526
+ 2 0 0 0 2 0 0 0
527
+ 3 0 0 0 3 0 0 0
528
+ 4 0 0 0 4 0 0 0
529
+ """
530
+ return (
531
+ self._get_values()
532
+ .components.set_index(self._parent.index)
533
+ .__finalize__(self._parent)
534
+ )
535
+
536
+ @property
537
+ def freq(self):
538
+ return self._get_values().inferred_freq
539
+
540
+
541
+ @delegate_names(
542
+ delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property"
543
+ )
544
+ @delegate_names(
545
+ delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method"
546
+ )
547
+ class PeriodProperties(Properties):
548
+ """
549
+ Accessor object for datetimelike properties of the Series values.
550
+
551
+ Returns a Series indexed like the original Series.
552
+ Raises TypeError if the Series does not contain datetimelike values.
553
+
554
+ Examples
555
+ --------
556
+ >>> seconds_series = pd.Series(
557
+ ... pd.period_range(
558
+ ... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"
559
+ ... )
560
+ ... )
561
+ >>> seconds_series
562
+ 0 2000-01-01 00:00:00
563
+ 1 2000-01-01 00:00:01
564
+ 2 2000-01-01 00:00:02
565
+ 3 2000-01-01 00:00:03
566
+ dtype: period[s]
567
+ >>> seconds_series.dt.second
568
+ 0 0
569
+ 1 1
570
+ 2 2
571
+ 3 3
572
+ dtype: int64
573
+
574
+ >>> hours_series = pd.Series(
575
+ ... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")
576
+ ... )
577
+ >>> hours_series
578
+ 0 2000-01-01 00:00
579
+ 1 2000-01-01 01:00
580
+ 2 2000-01-01 02:00
581
+ 3 2000-01-01 03:00
582
+ dtype: period[h]
583
+ >>> hours_series.dt.hour
584
+ 0 0
585
+ 1 1
586
+ 2 2
587
+ 3 3
588
+ dtype: int64
589
+
590
+ >>> quarters_series = pd.Series(
591
+ ... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")
592
+ ... )
593
+ >>> quarters_series
594
+ 0 2000Q1
595
+ 1 2000Q2
596
+ 2 2000Q3
597
+ 3 2000Q4
598
+ dtype: period[Q-DEC]
599
+ >>> quarters_series.dt.quarter
600
+ 0 1
601
+ 1 2
602
+ 2 3
603
+ 3 4
604
+ dtype: int64
605
+ """
606
+
607
+
608
+ class CombinedDatetimelikeProperties(
609
+ DatetimeProperties, TimedeltaProperties, PeriodProperties
610
+ ):
611
+ def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor]
612
+ # CombinedDatetimelikeProperties isn't really instantiated. Instead
613
+ # we need to choose which parent (datetime or timedelta) is
614
+ # appropriate. Since we're checking the dtypes anyway, we'll just
615
+ # do all the validation here.
616
+
617
+ if not isinstance(data, ABCSeries):
618
+ raise TypeError(
619
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
620
+ )
621
+
622
+ orig = data if isinstance(data.dtype, CategoricalDtype) else None
623
+ if orig is not None:
624
+ data = data._constructor(
625
+ orig.array,
626
+ name=orig.name,
627
+ copy=False,
628
+ dtype=orig._values.categories.dtype,
629
+ index=orig.index,
630
+ )
631
+
632
+ if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in "Mm":
633
+ return ArrowTemporalProperties(data, orig)
634
+ if lib.is_np_dtype(data.dtype, "M"):
635
+ return DatetimeProperties(data, orig)
636
+ elif isinstance(data.dtype, DatetimeTZDtype):
637
+ return DatetimeProperties(data, orig)
638
+ elif lib.is_np_dtype(data.dtype, "m"):
639
+ return TimedeltaProperties(data, orig)
640
+ elif isinstance(data.dtype, PeriodDtype):
641
+ return PeriodProperties(data, orig)
642
+
643
+ raise AttributeError("Can only use .dt accessor with datetimelike values")
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/api.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import textwrap
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ cast,
7
+ )
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import (
12
+ NaT,
13
+ lib,
14
+ )
15
+ from pandas.errors import InvalidIndexError
16
+
17
+ from pandas.core.dtypes.cast import find_common_type
18
+
19
+ from pandas.core.algorithms import safe_sort
20
+ from pandas.core.indexes.base import (
21
+ Index,
22
+ _new_Index,
23
+ ensure_index,
24
+ ensure_index_from_sequences,
25
+ get_unanimous_names,
26
+ )
27
+ from pandas.core.indexes.category import CategoricalIndex
28
+ from pandas.core.indexes.datetimes import DatetimeIndex
29
+ from pandas.core.indexes.interval import IntervalIndex
30
+ from pandas.core.indexes.multi import MultiIndex
31
+ from pandas.core.indexes.period import PeriodIndex
32
+ from pandas.core.indexes.range import RangeIndex
33
+ from pandas.core.indexes.timedeltas import TimedeltaIndex
34
+
35
+ if TYPE_CHECKING:
36
+ from pandas._typing import Axis
37
+ _sort_msg = textwrap.dedent(
38
+ """\
39
+ Sorting because non-concatenation axis is not aligned. A future version
40
+ of pandas will change to not sort by default.
41
+
42
+ To accept the future behavior, pass 'sort=False'.
43
+
44
+ To retain the current behavior and silence the warning, pass 'sort=True'.
45
+ """
46
+ )
47
+
48
+
49
+ __all__ = [
50
+ "Index",
51
+ "MultiIndex",
52
+ "CategoricalIndex",
53
+ "IntervalIndex",
54
+ "RangeIndex",
55
+ "InvalidIndexError",
56
+ "TimedeltaIndex",
57
+ "PeriodIndex",
58
+ "DatetimeIndex",
59
+ "_new_Index",
60
+ "NaT",
61
+ "ensure_index",
62
+ "ensure_index_from_sequences",
63
+ "get_objs_combined_axis",
64
+ "union_indexes",
65
+ "get_unanimous_names",
66
+ "all_indexes_same",
67
+ "default_index",
68
+ "safe_sort_index",
69
+ ]
70
+
71
+
72
+ def get_objs_combined_axis(
73
+ objs,
74
+ intersect: bool = False,
75
+ axis: Axis = 0,
76
+ sort: bool = True,
77
+ copy: bool = False,
78
+ ) -> Index:
79
+ """
80
+ Extract combined index: return intersection or union (depending on the
81
+ value of "intersect") of indexes on given axis, or None if all objects
82
+ lack indexes (e.g. they are numpy arrays).
83
+
84
+ Parameters
85
+ ----------
86
+ objs : list
87
+ Series or DataFrame objects, may be mix of the two.
88
+ intersect : bool, default False
89
+ If True, calculate the intersection between indexes. Otherwise,
90
+ calculate the union.
91
+ axis : {0 or 'index', 1 or 'outer'}, default 0
92
+ The axis to extract indexes from.
93
+ sort : bool, default True
94
+ Whether the result index should come out sorted or not.
95
+ copy : bool, default False
96
+ If True, return a copy of the combined index.
97
+
98
+ Returns
99
+ -------
100
+ Index
101
+ """
102
+ obs_idxes = [obj._get_axis(axis) for obj in objs]
103
+ return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy)
104
+
105
+
106
+ def _get_distinct_objs(objs: list[Index]) -> list[Index]:
107
+ """
108
+ Return a list with distinct elements of "objs" (different ids).
109
+ Preserves order.
110
+ """
111
+ ids: set[int] = set()
112
+ res = []
113
+ for obj in objs:
114
+ if id(obj) not in ids:
115
+ ids.add(id(obj))
116
+ res.append(obj)
117
+ return res
118
+
119
+
120
+ def _get_combined_index(
121
+ indexes: list[Index],
122
+ intersect: bool = False,
123
+ sort: bool = False,
124
+ copy: bool = False,
125
+ ) -> Index:
126
+ """
127
+ Return the union or intersection of indexes.
128
+
129
+ Parameters
130
+ ----------
131
+ indexes : list of Index or list objects
132
+ When intersect=True, do not accept list of lists.
133
+ intersect : bool, default False
134
+ If True, calculate the intersection between indexes. Otherwise,
135
+ calculate the union.
136
+ sort : bool, default False
137
+ Whether the result index should come out sorted or not.
138
+ copy : bool, default False
139
+ If True, return a copy of the combined index.
140
+
141
+ Returns
142
+ -------
143
+ Index
144
+ """
145
+ # TODO: handle index names!
146
+ indexes = _get_distinct_objs(indexes)
147
+ if len(indexes) == 0:
148
+ index = Index([])
149
+ elif len(indexes) == 1:
150
+ index = indexes[0]
151
+ elif intersect:
152
+ index = indexes[0]
153
+ for other in indexes[1:]:
154
+ index = index.intersection(other)
155
+ else:
156
+ index = union_indexes(indexes, sort=False)
157
+ index = ensure_index(index)
158
+
159
+ if sort:
160
+ index = safe_sort_index(index)
161
+ # GH 29879
162
+ if copy:
163
+ index = index.copy()
164
+
165
+ return index
166
+
167
+
168
+ def safe_sort_index(index: Index) -> Index:
169
+ """
170
+ Returns the sorted index
171
+
172
+ We keep the dtypes and the name attributes.
173
+
174
+ Parameters
175
+ ----------
176
+ index : an Index
177
+
178
+ Returns
179
+ -------
180
+ Index
181
+ """
182
+ if index.is_monotonic_increasing:
183
+ return index
184
+
185
+ try:
186
+ array_sorted = safe_sort(index)
187
+ except TypeError:
188
+ pass
189
+ else:
190
+ if isinstance(array_sorted, Index):
191
+ return array_sorted
192
+
193
+ array_sorted = cast(np.ndarray, array_sorted)
194
+ if isinstance(index, MultiIndex):
195
+ index = MultiIndex.from_tuples(array_sorted, names=index.names)
196
+ else:
197
+ index = Index(array_sorted, name=index.name, dtype=index.dtype)
198
+
199
+ return index
200
+
201
+
202
+ def union_indexes(indexes, sort: bool | None = True) -> Index:
203
+ """
204
+ Return the union of indexes.
205
+
206
+ The behavior of sort and names is not consistent.
207
+
208
+ Parameters
209
+ ----------
210
+ indexes : list of Index or list objects
211
+ sort : bool, default True
212
+ Whether the result index should come out sorted or not.
213
+
214
+ Returns
215
+ -------
216
+ Index
217
+ """
218
+ if len(indexes) == 0:
219
+ raise AssertionError("Must have at least 1 Index to union")
220
+ if len(indexes) == 1:
221
+ result = indexes[0]
222
+ if isinstance(result, list):
223
+ if not sort:
224
+ result = Index(result)
225
+ else:
226
+ result = Index(sorted(result))
227
+ return result
228
+
229
+ indexes, kind = _sanitize_and_check(indexes)
230
+
231
+ def _unique_indices(inds, dtype) -> Index:
232
+ """
233
+ Concatenate indices and remove duplicates.
234
+
235
+ Parameters
236
+ ----------
237
+ inds : list of Index or list objects
238
+ dtype : dtype to set for the resulting Index
239
+
240
+ Returns
241
+ -------
242
+ Index
243
+ """
244
+ if all(isinstance(ind, Index) for ind in inds):
245
+ inds = [ind.astype(dtype, copy=False) for ind in inds]
246
+ result = inds[0].unique()
247
+ other = inds[1].append(inds[2:])
248
+ diff = other[result.get_indexer_for(other) == -1]
249
+ if len(diff):
250
+ result = result.append(diff.unique())
251
+ if sort:
252
+ result = result.sort_values()
253
+ return result
254
+
255
+ def conv(i):
256
+ if isinstance(i, Index):
257
+ i = i.tolist()
258
+ return i
259
+
260
+ return Index(
261
+ lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort),
262
+ dtype=dtype,
263
+ )
264
+
265
+ def _find_common_index_dtype(inds):
266
+ """
267
+ Finds a common type for the indexes to pass through to resulting index.
268
+
269
+ Parameters
270
+ ----------
271
+ inds: list of Index or list objects
272
+
273
+ Returns
274
+ -------
275
+ The common type or None if no indexes were given
276
+ """
277
+ dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)]
278
+ if dtypes:
279
+ dtype = find_common_type(dtypes)
280
+ else:
281
+ dtype = None
282
+
283
+ return dtype
284
+
285
+ if kind == "special":
286
+ result = indexes[0]
287
+
288
+ dtis = [x for x in indexes if isinstance(x, DatetimeIndex)]
289
+ dti_tzs = [x for x in dtis if x.tz is not None]
290
+ if len(dti_tzs) not in [0, len(dtis)]:
291
+ # TODO: this behavior is not tested (so may not be desired),
292
+ # but is kept in order to keep behavior the same when
293
+ # deprecating union_many
294
+ # test_frame_from_dict_with_mixed_indexes
295
+ raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
296
+
297
+ if len(dtis) == len(indexes):
298
+ sort = True
299
+ result = indexes[0]
300
+
301
+ elif len(dtis) > 1:
302
+ # If we have mixed timezones, our casting behavior may depend on
303
+ # the order of indexes, which we don't want.
304
+ sort = False
305
+
306
+ # TODO: what about Categorical[dt64]?
307
+ # test_frame_from_dict_with_mixed_indexes
308
+ indexes = [x.astype(object, copy=False) for x in indexes]
309
+ result = indexes[0]
310
+
311
+ for other in indexes[1:]:
312
+ result = result.union(other, sort=None if sort else False)
313
+ return result
314
+
315
+ elif kind == "array":
316
+ dtype = _find_common_index_dtype(indexes)
317
+ index = indexes[0]
318
+ if not all(index.equals(other) for other in indexes[1:]):
319
+ index = _unique_indices(indexes, dtype)
320
+
321
+ name = get_unanimous_names(*indexes)[0]
322
+ if name != index.name:
323
+ index = index.rename(name)
324
+ return index
325
+ else: # kind='list'
326
+ dtype = _find_common_index_dtype(indexes)
327
+ return _unique_indices(indexes, dtype)
328
+
329
+
330
+ def _sanitize_and_check(indexes):
331
+ """
332
+ Verify the type of indexes and convert lists to Index.
333
+
334
+ Cases:
335
+
336
+ - [list, list, ...]: Return ([list, list, ...], 'list')
337
+ - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
338
+ Lists are sorted and converted to Index.
339
+ - [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
340
+ TYPE = 'special' if at least one special type, 'array' otherwise.
341
+
342
+ Parameters
343
+ ----------
344
+ indexes : list of Index or list objects
345
+
346
+ Returns
347
+ -------
348
+ sanitized_indexes : list of Index or list objects
349
+ type : {'list', 'array', 'special'}
350
+ """
351
+ kinds = list({type(index) for index in indexes})
352
+
353
+ if list in kinds:
354
+ if len(kinds) > 1:
355
+ indexes = [
356
+ Index(list(x)) if not isinstance(x, Index) else x for x in indexes
357
+ ]
358
+ kinds.remove(list)
359
+ else:
360
+ return indexes, "list"
361
+
362
+ if len(kinds) > 1 or Index not in kinds:
363
+ return indexes, "special"
364
+ else:
365
+ return indexes, "array"
366
+
367
+
368
+ def all_indexes_same(indexes) -> bool:
369
+ """
370
+ Determine if all indexes contain the same elements.
371
+
372
+ Parameters
373
+ ----------
374
+ indexes : iterable of Index objects
375
+
376
+ Returns
377
+ -------
378
+ bool
379
+ True if all indexes contain the same elements, False otherwise.
380
+ """
381
+ itr = iter(indexes)
382
+ first = next(itr)
383
+ return all(first.equals(index) for index in itr)
384
+
385
+
386
+ def default_index(n: int) -> RangeIndex:
387
+ rng = range(n)
388
+ return RangeIndex._simple_new(rng, name=None)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/base.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/category.py ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Literal,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import index as libindex
13
+ from pandas.util._decorators import (
14
+ cache_readonly,
15
+ doc,
16
+ )
17
+
18
+ from pandas.core.dtypes.common import is_scalar
19
+ from pandas.core.dtypes.concat import concat_compat
20
+ from pandas.core.dtypes.dtypes import CategoricalDtype
21
+ from pandas.core.dtypes.missing import (
22
+ is_valid_na_for_dtype,
23
+ isna,
24
+ )
25
+
26
+ from pandas.core.arrays.categorical import (
27
+ Categorical,
28
+ contains,
29
+ )
30
+ from pandas.core.construction import extract_array
31
+ from pandas.core.indexes.base import (
32
+ Index,
33
+ maybe_extract_name,
34
+ )
35
+ from pandas.core.indexes.extension import (
36
+ NDArrayBackedExtensionIndex,
37
+ inherit_names,
38
+ )
39
+
40
+ if TYPE_CHECKING:
41
+ from collections.abc import Hashable
42
+
43
+ from pandas._typing import (
44
+ Dtype,
45
+ DtypeObj,
46
+ Self,
47
+ npt,
48
+ )
49
+
50
+
51
+ @inherit_names(
52
+ [
53
+ "argsort",
54
+ "tolist",
55
+ "codes",
56
+ "categories",
57
+ "ordered",
58
+ "_reverse_indexer",
59
+ "searchsorted",
60
+ "min",
61
+ "max",
62
+ ],
63
+ Categorical,
64
+ )
65
+ @inherit_names(
66
+ [
67
+ "rename_categories",
68
+ "reorder_categories",
69
+ "add_categories",
70
+ "remove_categories",
71
+ "remove_unused_categories",
72
+ "set_categories",
73
+ "as_ordered",
74
+ "as_unordered",
75
+ ],
76
+ Categorical,
77
+ wrap=True,
78
+ )
79
+ class CategoricalIndex(NDArrayBackedExtensionIndex):
80
+ """
81
+ Index based on an underlying :class:`Categorical`.
82
+
83
+ CategoricalIndex, like Categorical, can only take on a limited,
84
+ and usually fixed, number of possible values (`categories`). Also,
85
+ like Categorical, it might have an order, but numerical operations
86
+ (additions, divisions, ...) are not possible.
87
+
88
+ Parameters
89
+ ----------
90
+ data : array-like (1-dimensional)
91
+ The values of the categorical. If `categories` are given, values not in
92
+ `categories` will be replaced with NaN.
93
+ categories : index-like, optional
94
+ The categories for the categorical. Items need to be unique.
95
+ If the categories are not given here (and also not in `dtype`), they
96
+ will be inferred from the `data`.
97
+ ordered : bool, optional
98
+ Whether or not this categorical is treated as an ordered
99
+ categorical. If not given here or in `dtype`, the resulting
100
+ categorical will be unordered.
101
+ dtype : CategoricalDtype or "category", optional
102
+ If :class:`CategoricalDtype`, cannot be used together with
103
+ `categories` or `ordered`.
104
+ copy : bool, default False
105
+ Make a copy of input ndarray.
106
+ name : object, optional
107
+ Name to be stored in the index.
108
+
109
+ Attributes
110
+ ----------
111
+ codes
112
+ categories
113
+ ordered
114
+
115
+ Methods
116
+ -------
117
+ rename_categories
118
+ reorder_categories
119
+ add_categories
120
+ remove_categories
121
+ remove_unused_categories
122
+ set_categories
123
+ as_ordered
124
+ as_unordered
125
+ map
126
+
127
+ Raises
128
+ ------
129
+ ValueError
130
+ If the categories do not validate.
131
+ TypeError
132
+ If an explicit ``ordered=True`` is given but no `categories` and the
133
+ `values` are not sortable.
134
+
135
+ See Also
136
+ --------
137
+ Index : The base pandas Index type.
138
+ Categorical : A categorical array.
139
+ CategoricalDtype : Type for categorical data.
140
+
141
+ Notes
142
+ -----
143
+ See the `user guide
144
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__
145
+ for more.
146
+
147
+ Examples
148
+ --------
149
+ >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
150
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
151
+ categories=['a', 'b', 'c'], ordered=False, dtype='category')
152
+
153
+ ``CategoricalIndex`` can also be instantiated from a ``Categorical``:
154
+
155
+ >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
156
+ >>> pd.CategoricalIndex(c)
157
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
158
+ categories=['a', 'b', 'c'], ordered=False, dtype='category')
159
+
160
+ Ordered ``CategoricalIndex`` can have a min and max value.
161
+
162
+ >>> ci = pd.CategoricalIndex(
163
+ ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
164
+ ... )
165
+ >>> ci
166
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
167
+ categories=['c', 'b', 'a'], ordered=True, dtype='category')
168
+ >>> ci.min()
169
+ 'c'
170
+ """
171
+
172
+ _typ = "categoricalindex"
173
+ _data_cls = Categorical
174
+
175
+ @property
176
+ def _can_hold_strings(self):
177
+ return self.categories._can_hold_strings
178
+
179
+ @cache_readonly
180
+ def _should_fallback_to_positional(self) -> bool:
181
+ return self.categories._should_fallback_to_positional
182
+
183
+ codes: np.ndarray
184
+ categories: Index
185
+ ordered: bool | None
186
+ _data: Categorical
187
+ _values: Categorical
188
+
189
+ @property
190
+ def _engine_type(self) -> type[libindex.IndexEngine]:
191
+ # self.codes can have dtype int8, int16, int32 or int64, so we need
192
+ # to return the corresponding engine type (libindex.Int8Engine, etc.).
193
+ return {
194
+ np.int8: libindex.Int8Engine,
195
+ np.int16: libindex.Int16Engine,
196
+ np.int32: libindex.Int32Engine,
197
+ np.int64: libindex.Int64Engine,
198
+ }[self.codes.dtype.type]
199
+
200
+ # --------------------------------------------------------------------
201
+ # Constructors
202
+
203
+ def __new__(
204
+ cls,
205
+ data=None,
206
+ categories=None,
207
+ ordered=None,
208
+ dtype: Dtype | None = None,
209
+ copy: bool = False,
210
+ name: Hashable | None = None,
211
+ ) -> Self:
212
+ name = maybe_extract_name(name, data, cls)
213
+
214
+ if is_scalar(data):
215
+ # GH#38944 include None here, which pre-2.0 subbed in []
216
+ cls._raise_scalar_data_error(data)
217
+
218
+ data = Categorical(
219
+ data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
220
+ )
221
+
222
+ return cls._simple_new(data, name=name)
223
+
224
+ # --------------------------------------------------------------------
225
+
226
+ def _is_dtype_compat(self, other: Index) -> Categorical:
227
+ """
228
+ *this is an internal non-public method*
229
+
230
+ provide a comparison between the dtype of self and other (coercing if
231
+ needed)
232
+
233
+ Parameters
234
+ ----------
235
+ other : Index
236
+
237
+ Returns
238
+ -------
239
+ Categorical
240
+
241
+ Raises
242
+ ------
243
+ TypeError if the dtypes are not compatible
244
+ """
245
+ if isinstance(other.dtype, CategoricalDtype):
246
+ cat = extract_array(other)
247
+ cat = cast(Categorical, cat)
248
+ if not cat._categories_match_up_to_permutation(self._values):
249
+ raise TypeError(
250
+ "categories must match existing categories when appending"
251
+ )
252
+
253
+ elif other._is_multi:
254
+ # preempt raising NotImplementedError in isna call
255
+ raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
256
+ else:
257
+ values = other
258
+
259
+ cat = Categorical(other, dtype=self.dtype)
260
+ other = CategoricalIndex(cat)
261
+ if not other.isin(values).all():
262
+ raise TypeError(
263
+ "cannot append a non-category item to a CategoricalIndex"
264
+ )
265
+ cat = other._values
266
+
267
+ if not ((cat == values) | (isna(cat) & isna(values))).all():
268
+ # GH#37667 see test_equals_non_category
269
+ raise TypeError(
270
+ "categories must match existing categories when appending"
271
+ )
272
+
273
+ return cat
274
+
275
+ def equals(self, other: object) -> bool:
276
+ """
277
+ Determine if two CategoricalIndex objects contain the same elements.
278
+
279
+ Returns
280
+ -------
281
+ bool
282
+ ``True`` if two :class:`pandas.CategoricalIndex` objects have equal
283
+ elements, ``False`` otherwise.
284
+
285
+ Examples
286
+ --------
287
+ >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
288
+ >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']))
289
+ >>> ci.equals(ci2)
290
+ True
291
+
292
+ The order of elements matters.
293
+
294
+ >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c'])
295
+ >>> ci.equals(ci3)
296
+ False
297
+
298
+ The orderedness also matters.
299
+
300
+ >>> ci4 = ci.as_ordered()
301
+ >>> ci.equals(ci4)
302
+ False
303
+
304
+ The categories matter, but the order of the categories matters only when
305
+ ``ordered=True``.
306
+
307
+ >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd'])
308
+ >>> ci.equals(ci5)
309
+ False
310
+
311
+ >>> ci6 = ci.set_categories(['b', 'c', 'a'])
312
+ >>> ci.equals(ci6)
313
+ True
314
+ >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
315
+ ... ordered=True)
316
+ >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a'])
317
+ >>> ci_ordered.equals(ci2_ordered)
318
+ False
319
+ """
320
+ if self.is_(other):
321
+ return True
322
+
323
+ if not isinstance(other, Index):
324
+ return False
325
+
326
+ try:
327
+ other = self._is_dtype_compat(other)
328
+ except (TypeError, ValueError):
329
+ return False
330
+
331
+ return self._data.equals(other)
332
+
333
+ # --------------------------------------------------------------------
334
+ # Rendering Methods
335
+
336
+ @property
337
+ def _formatter_func(self):
338
+ return self.categories._formatter_func
339
+
340
+ def _format_attrs(self):
341
+ """
342
+ Return a list of tuples of the (attr,formatted_value)
343
+ """
344
+ attrs: list[tuple[str, str | int | bool | None]]
345
+
346
+ attrs = [
347
+ (
348
+ "categories",
349
+ f"[{', '.join(self._data._repr_categories())}]",
350
+ ),
351
+ ("ordered", self.ordered),
352
+ ]
353
+ extra = super()._format_attrs()
354
+ return attrs + extra
355
+
356
+ # --------------------------------------------------------------------
357
+
358
+ @property
359
+ def inferred_type(self) -> str:
360
+ return "categorical"
361
+
362
+ @doc(Index.__contains__)
363
+ def __contains__(self, key: Any) -> bool:
364
+ # if key is a NaN, check if any NaN is in self.
365
+ if is_valid_na_for_dtype(key, self.categories.dtype):
366
+ return self.hasnans
367
+
368
+ return contains(self, key, container=self._engine)
369
+
370
+ def reindex(
371
+ self, target, method=None, level=None, limit: int | None = None, tolerance=None
372
+ ) -> tuple[Index, npt.NDArray[np.intp] | None]:
373
+ """
374
+ Create index with target's values (move/add/delete values as necessary)
375
+
376
+ Returns
377
+ -------
378
+ new_index : pd.Index
379
+ Resulting index
380
+ indexer : np.ndarray[np.intp] or None
381
+ Indices of output values in original index
382
+
383
+ """
384
+ if method is not None:
385
+ raise NotImplementedError(
386
+ "argument method is not implemented for CategoricalIndex.reindex"
387
+ )
388
+ if level is not None:
389
+ raise NotImplementedError(
390
+ "argument level is not implemented for CategoricalIndex.reindex"
391
+ )
392
+ if limit is not None:
393
+ raise NotImplementedError(
394
+ "argument limit is not implemented for CategoricalIndex.reindex"
395
+ )
396
+ return super().reindex(target)
397
+
398
+ # --------------------------------------------------------------------
399
+ # Indexing Methods
400
+
401
+ def _maybe_cast_indexer(self, key) -> int:
402
+ # GH#41933: we have to do this instead of self._data._validate_scalar
403
+ # because this will correctly get partial-indexing on Interval categories
404
+ try:
405
+ return self._data._unbox_scalar(key)
406
+ except KeyError:
407
+ if is_valid_na_for_dtype(key, self.categories.dtype):
408
+ return -1
409
+ raise
410
+
411
+ def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
412
+ if isinstance(values, CategoricalIndex):
413
+ values = values._data
414
+ if isinstance(values, Categorical):
415
+ # Indexing on codes is more efficient if categories are the same,
416
+ # so we can apply some optimizations based on the degree of
417
+ # dtype-matching.
418
+ cat = self._data._encode_with_my_categories(values)
419
+ codes = cat._codes
420
+ else:
421
+ codes = self.categories.get_indexer(values)
422
+ codes = codes.astype(self.codes.dtype, copy=False)
423
+ cat = self._data._from_backing_data(codes)
424
+ return type(self)._simple_new(cat)
425
+
426
+ # --------------------------------------------------------------------
427
+
428
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
429
+ return self.categories._is_comparable_dtype(dtype)
430
+
431
+ def map(self, mapper, na_action: Literal["ignore"] | None = None):
432
+ """
433
+ Map values using input an input mapping or function.
434
+
435
+ Maps the values (their categories, not the codes) of the index to new
436
+ categories. If the mapping correspondence is one-to-one the result is a
437
+ :class:`~pandas.CategoricalIndex` which has the same order property as
438
+ the original, otherwise an :class:`~pandas.Index` is returned.
439
+
440
+ If a `dict` or :class:`~pandas.Series` is used any unmapped category is
441
+ mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
442
+ will be returned.
443
+
444
+ Parameters
445
+ ----------
446
+ mapper : function, dict, or Series
447
+ Mapping correspondence.
448
+
449
+ Returns
450
+ -------
451
+ pandas.CategoricalIndex or pandas.Index
452
+ Mapped index.
453
+
454
+ See Also
455
+ --------
456
+ Index.map : Apply a mapping correspondence on an
457
+ :class:`~pandas.Index`.
458
+ Series.map : Apply a mapping correspondence on a
459
+ :class:`~pandas.Series`.
460
+ Series.apply : Apply more complex functions on a
461
+ :class:`~pandas.Series`.
462
+
463
+ Examples
464
+ --------
465
+ >>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
466
+ >>> idx
467
+ CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
468
+ ordered=False, dtype='category')
469
+ >>> idx.map(lambda x: x.upper())
470
+ CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
471
+ ordered=False, dtype='category')
472
+ >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
473
+ CategoricalIndex(['first', 'second', 'third'], categories=['first',
474
+ 'second', 'third'], ordered=False, dtype='category')
475
+
476
+ If the mapping is one-to-one the ordering of the categories is
477
+ preserved:
478
+
479
+ >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
480
+ >>> idx
481
+ CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
482
+ ordered=True, dtype='category')
483
+ >>> idx.map({'a': 3, 'b': 2, 'c': 1})
484
+ CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
485
+ dtype='category')
486
+
487
+ If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
488
+
489
+ >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
490
+ Index(['first', 'second', 'first'], dtype='object')
491
+
492
+ If a `dict` is used, all unmapped categories are mapped to `NaN` and
493
+ the result is an :class:`~pandas.Index`:
494
+
495
+ >>> idx.map({'a': 'first', 'b': 'second'})
496
+ Index(['first', 'second', nan], dtype='object')
497
+ """
498
+ mapped = self._values.map(mapper, na_action=na_action)
499
+ return Index(mapped, name=self.name)
500
+
501
+ def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
502
+ # if calling index is category, don't check dtype of others
503
+ try:
504
+ cat = Categorical._concat_same_type(
505
+ [self._is_dtype_compat(c) for c in to_concat]
506
+ )
507
+ except TypeError:
508
+ # not all to_concat elements are among our categories (or NA)
509
+
510
+ res = concat_compat([x._values for x in to_concat])
511
+ return Index(res, name=name)
512
+ else:
513
+ return type(self)._simple_new(cat, name=name)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/datetimelike.py ADDED
@@ -0,0 +1,843 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Base and utility classes for tseries type pandas objects.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from abc import (
7
+ ABC,
8
+ abstractmethod,
9
+ )
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ Callable,
14
+ cast,
15
+ final,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._config import using_copy_on_write
22
+
23
+ from pandas._libs import (
24
+ NaT,
25
+ Timedelta,
26
+ lib,
27
+ )
28
+ from pandas._libs.tslibs import (
29
+ BaseOffset,
30
+ Resolution,
31
+ Tick,
32
+ parsing,
33
+ to_offset,
34
+ )
35
+ from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
36
+ from pandas.compat.numpy import function as nv
37
+ from pandas.errors import (
38
+ InvalidIndexError,
39
+ NullFrequencyError,
40
+ )
41
+ from pandas.util._decorators import (
42
+ Appender,
43
+ cache_readonly,
44
+ doc,
45
+ )
46
+ from pandas.util._exceptions import find_stack_level
47
+
48
+ from pandas.core.dtypes.common import (
49
+ is_integer,
50
+ is_list_like,
51
+ )
52
+ from pandas.core.dtypes.concat import concat_compat
53
+ from pandas.core.dtypes.dtypes import CategoricalDtype
54
+
55
+ from pandas.core.arrays import (
56
+ DatetimeArray,
57
+ ExtensionArray,
58
+ PeriodArray,
59
+ TimedeltaArray,
60
+ )
61
+ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
62
+ import pandas.core.common as com
63
+ import pandas.core.indexes.base as ibase
64
+ from pandas.core.indexes.base import (
65
+ Index,
66
+ _index_shared_docs,
67
+ )
68
+ from pandas.core.indexes.extension import NDArrayBackedExtensionIndex
69
+ from pandas.core.indexes.range import RangeIndex
70
+ from pandas.core.tools.timedeltas import to_timedelta
71
+
72
+ if TYPE_CHECKING:
73
+ from collections.abc import Sequence
74
+ from datetime import datetime
75
+
76
+ from pandas._typing import (
77
+ Axis,
78
+ Self,
79
+ npt,
80
+ )
81
+
82
+ from pandas import CategoricalIndex
83
+
84
+ _index_doc_kwargs = dict(ibase._index_doc_kwargs)
85
+
86
+
87
+ class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):
88
+ """
89
+ Common ops mixin to support a unified interface datetimelike Index.
90
+ """
91
+
92
+ _can_hold_strings = False
93
+ _data: DatetimeArray | TimedeltaArray | PeriodArray
94
+
95
+ @doc(DatetimeLikeArrayMixin.mean)
96
+ def mean(self, *, skipna: bool = True, axis: int | None = 0):
97
+ return self._data.mean(skipna=skipna, axis=axis)
98
+
99
+ @property
100
+ def freq(self) -> BaseOffset | None:
101
+ return self._data.freq
102
+
103
+ @freq.setter
104
+ def freq(self, value) -> None:
105
+ # error: Property "freq" defined in "PeriodArray" is read-only [misc]
106
+ self._data.freq = value # type: ignore[misc]
107
+
108
+ @property
109
+ def asi8(self) -> npt.NDArray[np.int64]:
110
+ return self._data.asi8
111
+
112
+ @property
113
+ @doc(DatetimeLikeArrayMixin.freqstr)
114
+ def freqstr(self) -> str:
115
+ from pandas import PeriodIndex
116
+
117
+ if self._data.freqstr is not None and isinstance(
118
+ self._data, (PeriodArray, PeriodIndex)
119
+ ):
120
+ freq = freq_to_period_freqstr(self._data.freq.n, self._data.freq.name)
121
+ return freq
122
+ else:
123
+ return self._data.freqstr # type: ignore[return-value]
124
+
125
+ @cache_readonly
126
+ @abstractmethod
127
+ def _resolution_obj(self) -> Resolution:
128
+ ...
129
+
130
+ @cache_readonly
131
+ @doc(DatetimeLikeArrayMixin.resolution)
132
+ def resolution(self) -> str:
133
+ return self._data.resolution
134
+
135
+ # ------------------------------------------------------------------------
136
+
137
+ @cache_readonly
138
+ def hasnans(self) -> bool:
139
+ return self._data._hasna
140
+
141
+ def equals(self, other: Any) -> bool:
142
+ """
143
+ Determines if two Index objects contain the same elements.
144
+ """
145
+ if self.is_(other):
146
+ return True
147
+
148
+ if not isinstance(other, Index):
149
+ return False
150
+ elif other.dtype.kind in "iufc":
151
+ return False
152
+ elif not isinstance(other, type(self)):
153
+ should_try = False
154
+ inferable = self._data._infer_matches
155
+ if other.dtype == object:
156
+ should_try = other.inferred_type in inferable
157
+ elif isinstance(other.dtype, CategoricalDtype):
158
+ other = cast("CategoricalIndex", other)
159
+ should_try = other.categories.inferred_type in inferable
160
+
161
+ if should_try:
162
+ try:
163
+ other = type(self)(other)
164
+ except (ValueError, TypeError, OverflowError):
165
+ # e.g.
166
+ # ValueError -> cannot parse str entry, or OutOfBoundsDatetime
167
+ # TypeError -> trying to convert IntervalIndex to DatetimeIndex
168
+ # OverflowError -> Index([very_large_timedeltas])
169
+ return False
170
+
171
+ if self.dtype != other.dtype:
172
+ # have different timezone
173
+ return False
174
+
175
+ return np.array_equal(self.asi8, other.asi8)
176
+
177
+ @Appender(Index.__contains__.__doc__)
178
+ def __contains__(self, key: Any) -> bool:
179
+ hash(key)
180
+ try:
181
+ self.get_loc(key)
182
+ except (KeyError, TypeError, ValueError, InvalidIndexError):
183
+ return False
184
+ return True
185
+
186
+ def _convert_tolerance(self, tolerance, target):
187
+ tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
188
+ return super()._convert_tolerance(tolerance, target)
189
+
190
+ # --------------------------------------------------------------------
191
+ # Rendering Methods
192
+ _default_na_rep = "NaT"
193
+
194
+ def format(
195
+ self,
196
+ name: bool = False,
197
+ formatter: Callable | None = None,
198
+ na_rep: str = "NaT",
199
+ date_format: str | None = None,
200
+ ) -> list[str]:
201
+ """
202
+ Render a string representation of the Index.
203
+ """
204
+ warnings.warn(
205
+ # GH#55413
206
+ f"{type(self).__name__}.format is deprecated and will be removed "
207
+ "in a future version. Convert using index.astype(str) or "
208
+ "index.map(formatter) instead.",
209
+ FutureWarning,
210
+ stacklevel=find_stack_level(),
211
+ )
212
+ header = []
213
+ if name:
214
+ header.append(
215
+ ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
216
+ if self.name is not None
217
+ else ""
218
+ )
219
+
220
+ if formatter is not None:
221
+ return header + list(self.map(formatter))
222
+
223
+ return self._format_with_header(
224
+ header=header, na_rep=na_rep, date_format=date_format
225
+ )
226
+
227
+ def _format_with_header(
228
+ self, *, header: list[str], na_rep: str, date_format: str | None = None
229
+ ) -> list[str]:
230
+ # TODO: not reached in tests 2023-10-11
231
+ # matches base class except for whitespace padding and date_format
232
+ return header + list(
233
+ self._get_values_for_csv(na_rep=na_rep, date_format=date_format)
234
+ )
235
+
236
+ @property
237
+ def _formatter_func(self):
238
+ return self._data._formatter()
239
+
240
+ def _format_attrs(self):
241
+ """
242
+ Return a list of tuples of the (attr,formatted_value).
243
+ """
244
+ attrs = super()._format_attrs()
245
+ for attrib in self._attributes:
246
+ # iterating over _attributes prevents us from doing this for PeriodIndex
247
+ if attrib == "freq":
248
+ freq = self.freqstr
249
+ if freq is not None:
250
+ freq = repr(freq) # e.g. D -> 'D'
251
+ attrs.append(("freq", freq))
252
+ return attrs
253
+
254
+ @Appender(Index._summary.__doc__)
255
+ def _summary(self, name=None) -> str:
256
+ result = super()._summary(name=name)
257
+ if self.freq:
258
+ result += f"\nFreq: {self.freqstr}"
259
+
260
+ return result
261
+
262
+ # --------------------------------------------------------------------
263
+ # Indexing Methods
264
+
265
+ @final
266
+ def _can_partial_date_slice(self, reso: Resolution) -> bool:
267
+ # e.g. test_getitem_setitem_periodindex
268
+ # History of conversation GH#3452, GH#3931, GH#2369, GH#14826
269
+ return reso > self._resolution_obj
270
+ # NB: for DTI/PI, not TDI
271
+
272
+ def _parsed_string_to_bounds(self, reso: Resolution, parsed):
273
+ raise NotImplementedError
274
+
275
+ def _parse_with_reso(self, label: str):
276
+ # overridden by TimedeltaIndex
277
+ try:
278
+ if self.freq is None or hasattr(self.freq, "rule_code"):
279
+ freq = self.freq
280
+ except NotImplementedError:
281
+ freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
282
+
283
+ freqstr: str | None
284
+ if freq is not None and not isinstance(freq, str):
285
+ freqstr = freq.rule_code
286
+ else:
287
+ freqstr = freq
288
+
289
+ if isinstance(label, np.str_):
290
+ # GH#45580
291
+ label = str(label)
292
+
293
+ parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)
294
+ reso = Resolution.from_attrname(reso_str)
295
+ return parsed, reso
296
+
297
+ def _get_string_slice(self, key: str):
298
+ # overridden by TimedeltaIndex
299
+ parsed, reso = self._parse_with_reso(key)
300
+ try:
301
+ return self._partial_date_slice(reso, parsed)
302
+ except KeyError as err:
303
+ raise KeyError(key) from err
304
+
305
+ @final
306
+ def _partial_date_slice(
307
+ self,
308
+ reso: Resolution,
309
+ parsed: datetime,
310
+ ) -> slice | npt.NDArray[np.intp]:
311
+ """
312
+ Parameters
313
+ ----------
314
+ reso : Resolution
315
+ parsed : datetime
316
+
317
+ Returns
318
+ -------
319
+ slice or ndarray[intp]
320
+ """
321
+ if not self._can_partial_date_slice(reso):
322
+ raise ValueError
323
+
324
+ t1, t2 = self._parsed_string_to_bounds(reso, parsed)
325
+ vals = self._data._ndarray
326
+ unbox = self._data._unbox
327
+
328
+ if self.is_monotonic_increasing:
329
+ if len(self) and (
330
+ (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
331
+ ):
332
+ # we are out of range
333
+ raise KeyError
334
+
335
+ # TODO: does this depend on being monotonic _increasing_?
336
+
337
+ # a monotonic (sorted) series can be sliced
338
+ left = vals.searchsorted(unbox(t1), side="left")
339
+ right = vals.searchsorted(unbox(t2), side="right")
340
+ return slice(left, right)
341
+
342
+ else:
343
+ lhs_mask = vals >= unbox(t1)
344
+ rhs_mask = vals <= unbox(t2)
345
+
346
+ # try to find the dates
347
+ return (lhs_mask & rhs_mask).nonzero()[0]
348
+
349
+ def _maybe_cast_slice_bound(self, label, side: str):
350
+ """
351
+ If label is a string, cast it to scalar type according to resolution.
352
+
353
+ Parameters
354
+ ----------
355
+ label : object
356
+ side : {'left', 'right'}
357
+
358
+ Returns
359
+ -------
360
+ label : object
361
+
362
+ Notes
363
+ -----
364
+ Value of `side` parameter should be validated in caller.
365
+ """
366
+ if isinstance(label, str):
367
+ try:
368
+ parsed, reso = self._parse_with_reso(label)
369
+ except ValueError as err:
370
+ # DTI -> parsing.DateParseError
371
+ # TDI -> 'unit abbreviation w/o a number'
372
+ # PI -> string cannot be parsed as datetime-like
373
+ self._raise_invalid_indexer("slice", label, err)
374
+
375
+ lower, upper = self._parsed_string_to_bounds(reso, parsed)
376
+ return lower if side == "left" else upper
377
+ elif not isinstance(label, self._data._recognized_scalars):
378
+ self._raise_invalid_indexer("slice", label)
379
+
380
+ return label
381
+
382
+ # --------------------------------------------------------------------
383
+ # Arithmetic Methods
384
+
385
+ def shift(self, periods: int = 1, freq=None) -> Self:
386
+ """
387
+ Shift index by desired number of time frequency increments.
388
+
389
+ This method is for shifting the values of datetime-like indexes
390
+ by a specified time increment a given number of times.
391
+
392
+ Parameters
393
+ ----------
394
+ periods : int, default 1
395
+ Number of periods (or increments) to shift by,
396
+ can be positive or negative.
397
+ freq : pandas.DateOffset, pandas.Timedelta or string, optional
398
+ Frequency increment to shift by.
399
+ If None, the index is shifted by its own `freq` attribute.
400
+ Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
401
+
402
+ Returns
403
+ -------
404
+ pandas.DatetimeIndex
405
+ Shifted index.
406
+
407
+ See Also
408
+ --------
409
+ Index.shift : Shift values of Index.
410
+ PeriodIndex.shift : Shift values of PeriodIndex.
411
+ """
412
+ raise NotImplementedError
413
+
414
+ # --------------------------------------------------------------------
415
+
416
+ @doc(Index._maybe_cast_listlike_indexer)
417
+ def _maybe_cast_listlike_indexer(self, keyarr):
418
+ try:
419
+ res = self._data._validate_listlike(keyarr, allow_object=True)
420
+ except (ValueError, TypeError):
421
+ if not isinstance(keyarr, ExtensionArray):
422
+ # e.g. we don't want to cast DTA to ndarray[object]
423
+ res = com.asarray_tuplesafe(keyarr)
424
+ # TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
425
+ else:
426
+ res = keyarr
427
+ return Index(res, dtype=res.dtype)
428
+
429
+
430
+ class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):
431
+ """
432
+ Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
433
+ but not PeriodIndex
434
+ """
435
+
436
+ _data: DatetimeArray | TimedeltaArray
437
+ _comparables = ["name", "freq"]
438
+ _attributes = ["name", "freq"]
439
+
440
+ # Compat for frequency inference, see GH#23789
441
+ _is_monotonic_increasing = Index.is_monotonic_increasing
442
+ _is_monotonic_decreasing = Index.is_monotonic_decreasing
443
+ _is_unique = Index.is_unique
444
+
445
+ @property
446
+ def unit(self) -> str:
447
+ return self._data.unit
448
+
449
+ def as_unit(self, unit: str) -> Self:
450
+ """
451
+ Convert to a dtype with the given unit resolution.
452
+
453
+ Parameters
454
+ ----------
455
+ unit : {'s', 'ms', 'us', 'ns'}
456
+
457
+ Returns
458
+ -------
459
+ same type as self
460
+
461
+ Examples
462
+ --------
463
+ For :class:`pandas.DatetimeIndex`:
464
+
465
+ >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])
466
+ >>> idx
467
+ DatetimeIndex(['2020-01-02 01:02:03.004005006'],
468
+ dtype='datetime64[ns]', freq=None)
469
+ >>> idx.as_unit('s')
470
+ DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
471
+
472
+ For :class:`pandas.TimedeltaIndex`:
473
+
474
+ >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
475
+ >>> tdelta_idx
476
+ TimedeltaIndex(['1 days 00:03:00.000002042'],
477
+ dtype='timedelta64[ns]', freq=None)
478
+ >>> tdelta_idx.as_unit('s')
479
+ TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
480
+ """
481
+ arr = self._data.as_unit(unit)
482
+ return type(self)._simple_new(arr, name=self.name)
483
+
484
+ def _with_freq(self, freq):
485
+ arr = self._data._with_freq(freq)
486
+ return type(self)._simple_new(arr, name=self._name)
487
+
488
+ @property
489
+ def values(self) -> np.ndarray:
490
+ # NB: For Datetime64TZ this is lossy
491
+ data = self._data._ndarray
492
+ if using_copy_on_write():
493
+ data = data.view()
494
+ data.flags.writeable = False
495
+ return data
496
+
497
+ @doc(DatetimeIndexOpsMixin.shift)
498
+ def shift(self, periods: int = 1, freq=None) -> Self:
499
+ if freq is not None and freq != self.freq:
500
+ if isinstance(freq, str):
501
+ freq = to_offset(freq)
502
+ offset = periods * freq
503
+ return self + offset
504
+
505
+ if periods == 0 or len(self) == 0:
506
+ # GH#14811 empty case
507
+ return self.copy()
508
+
509
+ if self.freq is None:
510
+ raise NullFrequencyError("Cannot shift with no freq")
511
+
512
+ start = self[0] + periods * self.freq
513
+ end = self[-1] + periods * self.freq
514
+
515
+ # Note: in the DatetimeTZ case, _generate_range will infer the
516
+ # appropriate timezone from `start` and `end`, so tz does not need
517
+ # to be passed explicitly.
518
+ result = self._data._generate_range(
519
+ start=start, end=end, periods=None, freq=self.freq, unit=self.unit
520
+ )
521
+ return type(self)._simple_new(result, name=self.name)
522
+
523
+ @cache_readonly
524
+ @doc(DatetimeLikeArrayMixin.inferred_freq)
525
+ def inferred_freq(self) -> str | None:
526
+ return self._data.inferred_freq
527
+
528
+ # --------------------------------------------------------------------
529
+ # Set Operation Methods
530
+
531
+ @cache_readonly
532
+ def _as_range_index(self) -> RangeIndex:
533
+ # Convert our i8 representations to RangeIndex
534
+ # Caller is responsible for checking isinstance(self.freq, Tick)
535
+ freq = cast(Tick, self.freq)
536
+ tick = Timedelta(freq).as_unit("ns")._value
537
+ rng = range(self[0]._value, self[-1]._value + tick, tick)
538
+ return RangeIndex(rng)
539
+
540
+ def _can_range_setop(self, other) -> bool:
541
+ return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
542
+
543
+ def _wrap_range_setop(self, other, res_i8) -> Self:
544
+ new_freq = None
545
+ if not len(res_i8):
546
+ # RangeIndex defaults to step=1, which we don't want.
547
+ new_freq = self.freq
548
+ elif isinstance(res_i8, RangeIndex):
549
+ new_freq = to_offset(Timedelta(res_i8.step))
550
+
551
+ # TODO(GH#41493): we cannot just do
552
+ # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
553
+ # because test_setops_preserve_freq fails with _validate_frequency raising.
554
+ # This raising is incorrect, as 'on_freq' is incorrect. This will
555
+ # be fixed by GH#41493
556
+ res_values = res_i8.values.view(self._data._ndarray.dtype)
557
+ result = type(self._data)._simple_new(
558
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
559
+ # incompatible type "Union[dtype[Any], ExtensionDtype]"; expected
560
+ # "Union[dtype[datetime64], DatetimeTZDtype]"
561
+ res_values,
562
+ dtype=self.dtype, # type: ignore[arg-type]
563
+ freq=new_freq, # type: ignore[arg-type]
564
+ )
565
+ return cast("Self", self._wrap_setop_result(other, result))
566
+
567
+ def _range_intersect(self, other, sort) -> Self:
568
+ # Dispatch to RangeIndex intersection logic.
569
+ left = self._as_range_index
570
+ right = other._as_range_index
571
+ res_i8 = left.intersection(right, sort=sort)
572
+ return self._wrap_range_setop(other, res_i8)
573
+
574
+ def _range_union(self, other, sort) -> Self:
575
+ # Dispatch to RangeIndex union logic.
576
+ left = self._as_range_index
577
+ right = other._as_range_index
578
+ res_i8 = left.union(right, sort=sort)
579
+ return self._wrap_range_setop(other, res_i8)
580
+
581
+ def _intersection(self, other: Index, sort: bool = False) -> Index:
582
+ """
583
+ intersection specialized to the case with matching dtypes and both non-empty.
584
+ """
585
+ other = cast("DatetimeTimedeltaMixin", other)
586
+
587
+ if self._can_range_setop(other):
588
+ return self._range_intersect(other, sort=sort)
589
+
590
+ if not self._can_fast_intersect(other):
591
+ result = Index._intersection(self, other, sort=sort)
592
+ # We need to invalidate the freq because Index._intersection
593
+ # uses _shallow_copy on a view of self._data, which will preserve
594
+ # self.freq if we're not careful.
595
+ # At this point we should have result.dtype == self.dtype
596
+ # and type(result) is type(self._data)
597
+ result = self._wrap_setop_result(other, result)
598
+ return result._with_freq(None)._with_freq("infer")
599
+
600
+ else:
601
+ return self._fast_intersect(other, sort)
602
+
603
+ def _fast_intersect(self, other, sort):
604
+ # to make our life easier, "sort" the two ranges
605
+ if self[0] <= other[0]:
606
+ left, right = self, other
607
+ else:
608
+ left, right = other, self
609
+
610
+ # after sorting, the intersection always starts with the right index
611
+ # and ends with the index of which the last elements is smallest
612
+ end = min(left[-1], right[-1])
613
+ start = right[0]
614
+
615
+ if end < start:
616
+ result = self[:0]
617
+ else:
618
+ lslice = slice(*left.slice_locs(start, end))
619
+ result = left._values[lslice]
620
+
621
+ return result
622
+
623
+ def _can_fast_intersect(self, other: Self) -> bool:
624
+ # Note: we only get here with len(self) > 0 and len(other) > 0
625
+ if self.freq is None:
626
+ return False
627
+
628
+ elif other.freq != self.freq:
629
+ return False
630
+
631
+ elif not self.is_monotonic_increasing:
632
+ # Because freq is not None, we must then be monotonic decreasing
633
+ return False
634
+
635
+ # this along with matching freqs ensure that we "line up",
636
+ # so intersection will preserve freq
637
+ # Note we are assuming away Ticks, as those go through _range_intersect
638
+ # GH#42104
639
+ return self.freq.n == 1
640
+
641
+ def _can_fast_union(self, other: Self) -> bool:
642
+ # Assumes that type(self) == type(other), as per the annotation
643
+ # The ability to fast_union also implies that `freq` should be
644
+ # retained on union.
645
+ freq = self.freq
646
+
647
+ if freq is None or freq != other.freq:
648
+ return False
649
+
650
+ if not self.is_monotonic_increasing:
651
+ # Because freq is not None, we must then be monotonic decreasing
652
+ # TODO: do union on the reversed indexes?
653
+ return False
654
+
655
+ if len(self) == 0 or len(other) == 0:
656
+ # only reached via union_many
657
+ return True
658
+
659
+ # to make our life easier, "sort" the two ranges
660
+ if self[0] <= other[0]:
661
+ left, right = self, other
662
+ else:
663
+ left, right = other, self
664
+
665
+ right_start = right[0]
666
+ left_end = left[-1]
667
+
668
+ # Only need to "adjoin", not overlap
669
+ return (right_start == left_end + freq) or right_start in left
670
+
671
+ def _fast_union(self, other: Self, sort=None) -> Self:
672
+ # Caller is responsible for ensuring self and other are non-empty
673
+
674
+ # to make our life easier, "sort" the two ranges
675
+ if self[0] <= other[0]:
676
+ left, right = self, other
677
+ elif sort is False:
678
+ # TDIs are not in the "correct" order and we don't want
679
+ # to sort but want to remove overlaps
680
+ left, right = self, other
681
+ left_start = left[0]
682
+ loc = right.searchsorted(left_start, side="left")
683
+ right_chunk = right._values[:loc]
684
+ dates = concat_compat((left._values, right_chunk))
685
+ result = type(self)._simple_new(dates, name=self.name)
686
+ return result
687
+ else:
688
+ left, right = other, self
689
+
690
+ left_end = left[-1]
691
+ right_end = right[-1]
692
+
693
+ # concatenate
694
+ if left_end < right_end:
695
+ loc = right.searchsorted(left_end, side="right")
696
+ right_chunk = right._values[loc:]
697
+ dates = concat_compat([left._values, right_chunk])
698
+ # The can_fast_union check ensures that the result.freq
699
+ # should match self.freq
700
+ assert isinstance(dates, type(self._data))
701
+ # error: Item "ExtensionArray" of "ExtensionArray |
702
+ # ndarray[Any, Any]" has no attribute "_freq"
703
+ assert dates._freq == self.freq # type: ignore[union-attr]
704
+ result = type(self)._simple_new(dates)
705
+ return result
706
+ else:
707
+ return left
708
+
709
+ def _union(self, other, sort):
710
+ # We are called by `union`, which is responsible for this validation
711
+ assert isinstance(other, type(self))
712
+ assert self.dtype == other.dtype
713
+
714
+ if self._can_range_setop(other):
715
+ return self._range_union(other, sort=sort)
716
+
717
+ if self._can_fast_union(other):
718
+ result = self._fast_union(other, sort=sort)
719
+ # in the case with sort=None, the _can_fast_union check ensures
720
+ # that result.freq == self.freq
721
+ return result
722
+ else:
723
+ return super()._union(other, sort)._with_freq("infer")
724
+
725
+ # --------------------------------------------------------------------
726
+ # Join Methods
727
+
728
+ def _get_join_freq(self, other):
729
+ """
730
+ Get the freq to attach to the result of a join operation.
731
+ """
732
+ freq = None
733
+ if self._can_fast_union(other):
734
+ freq = self.freq
735
+ return freq
736
+
737
+ def _wrap_joined_index(
738
+ self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]
739
+ ):
740
+ assert other.dtype == self.dtype, (other.dtype, self.dtype)
741
+ result = super()._wrap_joined_index(joined, other, lidx, ridx)
742
+ result._data._freq = self._get_join_freq(other)
743
+ return result
744
+
745
+ def _get_engine_target(self) -> np.ndarray:
746
+ # engine methods and libjoin methods need dt64/td64 values cast to i8
747
+ return self._data._ndarray.view("i8")
748
+
749
+ def _from_join_target(self, result: np.ndarray):
750
+ # view e.g. i8 back to M8[ns]
751
+ result = result.view(self._data._ndarray.dtype)
752
+ return self._data._from_backing_data(result)
753
+
754
+ # --------------------------------------------------------------------
755
+ # List-like Methods
756
+
757
+ def _get_delete_freq(self, loc: int | slice | Sequence[int]):
758
+ """
759
+ Find the `freq` for self.delete(loc).
760
+ """
761
+ freq = None
762
+ if self.freq is not None:
763
+ if is_integer(loc):
764
+ if loc in (0, -len(self), -1, len(self) - 1):
765
+ freq = self.freq
766
+ else:
767
+ if is_list_like(loc):
768
+ # error: Incompatible types in assignment (expression has
769
+ # type "Union[slice, ndarray]", variable has type
770
+ # "Union[int, slice, Sequence[int]]")
771
+ loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
772
+ np.asarray(loc, dtype=np.intp), len(self)
773
+ )
774
+ if isinstance(loc, slice) and loc.step in (1, None):
775
+ if loc.start in (0, None) or loc.stop in (len(self), None):
776
+ freq = self.freq
777
+ return freq
778
+
779
+ def _get_insert_freq(self, loc: int, item):
780
+ """
781
+ Find the `freq` for self.insert(loc, item).
782
+ """
783
+ value = self._data._validate_scalar(item)
784
+ item = self._data._box_func(value)
785
+
786
+ freq = None
787
+ if self.freq is not None:
788
+ # freq can be preserved on edge cases
789
+ if self.size:
790
+ if item is NaT:
791
+ pass
792
+ elif loc in (0, -len(self)) and item + self.freq == self[0]:
793
+ freq = self.freq
794
+ elif (loc == len(self)) and item - self.freq == self[-1]:
795
+ freq = self.freq
796
+ else:
797
+ # Adding a single item to an empty index may preserve freq
798
+ if isinstance(self.freq, Tick):
799
+ # all TimedeltaIndex cases go through here; is_on_offset
800
+ # would raise TypeError
801
+ freq = self.freq
802
+ elif self.freq.is_on_offset(item):
803
+ freq = self.freq
804
+ return freq
805
+
806
+ @doc(NDArrayBackedExtensionIndex.delete)
807
+ def delete(self, loc) -> Self:
808
+ result = super().delete(loc)
809
+ result._data._freq = self._get_delete_freq(loc)
810
+ return result
811
+
812
+ @doc(NDArrayBackedExtensionIndex.insert)
813
+ def insert(self, loc: int, item):
814
+ result = super().insert(loc, item)
815
+ if isinstance(result, type(self)):
816
+ # i.e. parent class method did not cast
817
+ result._data._freq = self._get_insert_freq(loc, item)
818
+ return result
819
+
820
+ # --------------------------------------------------------------------
821
+ # NDArray-Like Methods
822
+
823
+ @Appender(_index_shared_docs["take"] % _index_doc_kwargs)
824
+ def take(
825
+ self,
826
+ indices,
827
+ axis: Axis = 0,
828
+ allow_fill: bool = True,
829
+ fill_value=None,
830
+ **kwargs,
831
+ ) -> Self:
832
+ nv.validate_take((), kwargs)
833
+ indices = np.asarray(indices, dtype=np.intp)
834
+
835
+ result = NDArrayBackedExtensionIndex.take(
836
+ self, indices, axis, allow_fill, fill_value, **kwargs
837
+ )
838
+
839
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
840
+ if isinstance(maybe_slice, slice):
841
+ freq = self._data._get_getitem_freq(maybe_slice)
842
+ result._data._freq = freq
843
+ return result
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/datetimes.py ADDED
@@ -0,0 +1,1127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import datetime as dt
4
+ import operator
5
+ from typing import TYPE_CHECKING
6
+ import warnings
7
+
8
+ import numpy as np
9
+ import pytz
10
+
11
+ from pandas._libs import (
12
+ NaT,
13
+ Period,
14
+ Timestamp,
15
+ index as libindex,
16
+ lib,
17
+ )
18
+ from pandas._libs.tslibs import (
19
+ Resolution,
20
+ Tick,
21
+ Timedelta,
22
+ periods_per_day,
23
+ timezones,
24
+ to_offset,
25
+ )
26
+ from pandas._libs.tslibs.offsets import prefix_mapping
27
+ from pandas.util._decorators import (
28
+ cache_readonly,
29
+ doc,
30
+ )
31
+ from pandas.util._exceptions import find_stack_level
32
+
33
+ from pandas.core.dtypes.common import is_scalar
34
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
35
+ from pandas.core.dtypes.generic import ABCSeries
36
+ from pandas.core.dtypes.missing import is_valid_na_for_dtype
37
+
38
+ from pandas.core.arrays.datetimes import (
39
+ DatetimeArray,
40
+ tz_to_dtype,
41
+ )
42
+ import pandas.core.common as com
43
+ from pandas.core.indexes.base import (
44
+ Index,
45
+ maybe_extract_name,
46
+ )
47
+ from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
48
+ from pandas.core.indexes.extension import inherit_names
49
+ from pandas.core.tools.times import to_time
50
+
51
+ if TYPE_CHECKING:
52
+ from collections.abc import Hashable
53
+
54
+ from pandas._typing import (
55
+ Dtype,
56
+ DtypeObj,
57
+ Frequency,
58
+ IntervalClosedType,
59
+ Self,
60
+ TimeAmbiguous,
61
+ TimeNonexistent,
62
+ npt,
63
+ )
64
+
65
+ from pandas.core.api import (
66
+ DataFrame,
67
+ PeriodIndex,
68
+ )
69
+
70
+ from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR
71
+
72
+
73
+ def _new_DatetimeIndex(cls, d):
74
+ """
75
+ This is called upon unpickling, rather than the default which doesn't
76
+ have arguments and breaks __new__
77
+ """
78
+ if "data" in d and not isinstance(d["data"], DatetimeIndex):
79
+ # Avoid need to verify integrity by calling simple_new directly
80
+ data = d.pop("data")
81
+ if not isinstance(data, DatetimeArray):
82
+ # For backward compat with older pickles, we may need to construct
83
+ # a DatetimeArray to adapt to the newer _simple_new signature
84
+ tz = d.pop("tz")
85
+ freq = d.pop("freq")
86
+ dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
87
+ else:
88
+ dta = data
89
+ for key in ["tz", "freq"]:
90
+ # These are already stored in our DatetimeArray; if they are
91
+ # also in the pickle and don't match, we have a problem.
92
+ if key in d:
93
+ assert d[key] == getattr(dta, key)
94
+ d.pop(key)
95
+ result = cls._simple_new(dta, **d)
96
+ else:
97
+ with warnings.catch_warnings():
98
+ # TODO: If we knew what was going in to **d, we might be able to
99
+ # go through _simple_new instead
100
+ warnings.simplefilter("ignore")
101
+ result = cls.__new__(cls, **d)
102
+
103
+ return result
104
+
105
+
106
+ @inherit_names(
107
+ DatetimeArray._field_ops
108
+ + [
109
+ method
110
+ for method in DatetimeArray._datetimelike_methods
111
+ if method not in ("tz_localize", "tz_convert", "strftime")
112
+ ],
113
+ DatetimeArray,
114
+ wrap=True,
115
+ )
116
+ @inherit_names(["is_normalized"], DatetimeArray, cache=True)
117
+ @inherit_names(
118
+ [
119
+ "tz",
120
+ "tzinfo",
121
+ "dtype",
122
+ "to_pydatetime",
123
+ "date",
124
+ "time",
125
+ "timetz",
126
+ "std",
127
+ ]
128
+ + DatetimeArray._bool_ops,
129
+ DatetimeArray,
130
+ )
131
+ class DatetimeIndex(DatetimeTimedeltaMixin):
132
+ """
133
+ Immutable ndarray-like of datetime64 data.
134
+
135
+ Represented internally as int64, and which can be boxed to Timestamp objects
136
+ that are subclasses of datetime and carry metadata.
137
+
138
+ .. versionchanged:: 2.0.0
139
+ The various numeric date/time attributes (:attr:`~DatetimeIndex.day`,
140
+ :attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype
141
+ ``int32``. Previously they had dtype ``int64``.
142
+
143
+ Parameters
144
+ ----------
145
+ data : array-like (1-dimensional)
146
+ Datetime-like data to construct index with.
147
+ freq : str or pandas offset object, optional
148
+ One of pandas date offset strings or corresponding objects. The string
149
+ 'infer' can be passed in order to set the frequency of the index as the
150
+ inferred frequency upon creation.
151
+ tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
152
+ Set the Timezone of the data.
153
+ normalize : bool, default False
154
+ Normalize start/end dates to midnight before generating date range.
155
+
156
+ .. deprecated:: 2.1.0
157
+
158
+ closed : {'left', 'right'}, optional
159
+ Set whether to include `start` and `end` that are on the
160
+ boundary. The default includes boundary points on either end.
161
+
162
+ .. deprecated:: 2.1.0
163
+
164
+ ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
165
+ When clocks moved backward due to DST, ambiguous times may arise.
166
+ For example in Central European Time (UTC+01), when going from 03:00
167
+ DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
168
+ and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
169
+ dictates how ambiguous times should be handled.
170
+
171
+ - 'infer' will attempt to infer fall dst-transition hours based on
172
+ order
173
+ - bool-ndarray where True signifies a DST time, False signifies a
174
+ non-DST time (note that this flag is only applicable for ambiguous
175
+ times)
176
+ - 'NaT' will return NaT where there are ambiguous times
177
+ - 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
178
+ dayfirst : bool, default False
179
+ If True, parse dates in `data` with the day first order.
180
+ yearfirst : bool, default False
181
+ If True parse dates in `data` with the year first order.
182
+ dtype : numpy.dtype or DatetimeTZDtype or str, default None
183
+ Note that the only NumPy dtype allowed is `datetime64[ns]`.
184
+ copy : bool, default False
185
+ Make a copy of input ndarray.
186
+ name : label, default None
187
+ Name to be stored in the index.
188
+
189
+ Attributes
190
+ ----------
191
+ year
192
+ month
193
+ day
194
+ hour
195
+ minute
196
+ second
197
+ microsecond
198
+ nanosecond
199
+ date
200
+ time
201
+ timetz
202
+ dayofyear
203
+ day_of_year
204
+ dayofweek
205
+ day_of_week
206
+ weekday
207
+ quarter
208
+ tz
209
+ freq
210
+ freqstr
211
+ is_month_start
212
+ is_month_end
213
+ is_quarter_start
214
+ is_quarter_end
215
+ is_year_start
216
+ is_year_end
217
+ is_leap_year
218
+ inferred_freq
219
+
220
+ Methods
221
+ -------
222
+ normalize
223
+ strftime
224
+ snap
225
+ tz_convert
226
+ tz_localize
227
+ round
228
+ floor
229
+ ceil
230
+ to_period
231
+ to_pydatetime
232
+ to_series
233
+ to_frame
234
+ month_name
235
+ day_name
236
+ mean
237
+ std
238
+
239
+ See Also
240
+ --------
241
+ Index : The base pandas Index type.
242
+ TimedeltaIndex : Index of timedelta64 data.
243
+ PeriodIndex : Index of Period data.
244
+ to_datetime : Convert argument to datetime.
245
+ date_range : Create a fixed-frequency DatetimeIndex.
246
+
247
+ Notes
248
+ -----
249
+ To learn more about the frequency strings, please see `this link
250
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
251
+
252
+ Examples
253
+ --------
254
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])
255
+ >>> idx
256
+ DatetimeIndex(['2020-01-01 10:00:00+00:00', '2020-02-01 11:00:00+00:00'],
257
+ dtype='datetime64[ns, UTC]', freq=None)
258
+ """
259
+
260
+ _typ = "datetimeindex"
261
+
262
+ _data_cls = DatetimeArray
263
+ _supports_partial_string_indexing = True
264
+
265
+ @property
266
+ def _engine_type(self) -> type[libindex.DatetimeEngine]:
267
+ return libindex.DatetimeEngine
268
+
269
+ _data: DatetimeArray
270
+ _values: DatetimeArray
271
+ tz: dt.tzinfo | None
272
+
273
+ # --------------------------------------------------------------------
274
+ # methods that dispatch to DatetimeArray and wrap result
275
+
276
+ @doc(DatetimeArray.strftime)
277
+ def strftime(self, date_format) -> Index:
278
+ arr = self._data.strftime(date_format)
279
+ return Index(arr, name=self.name, dtype=object)
280
+
281
+ @doc(DatetimeArray.tz_convert)
282
+ def tz_convert(self, tz) -> Self:
283
+ arr = self._data.tz_convert(tz)
284
+ return type(self)._simple_new(arr, name=self.name, refs=self._references)
285
+
286
+ @doc(DatetimeArray.tz_localize)
287
+ def tz_localize(
288
+ self,
289
+ tz,
290
+ ambiguous: TimeAmbiguous = "raise",
291
+ nonexistent: TimeNonexistent = "raise",
292
+ ) -> Self:
293
+ arr = self._data.tz_localize(tz, ambiguous, nonexistent)
294
+ return type(self)._simple_new(arr, name=self.name)
295
+
296
+ @doc(DatetimeArray.to_period)
297
+ def to_period(self, freq=None) -> PeriodIndex:
298
+ from pandas.core.indexes.api import PeriodIndex
299
+
300
+ arr = self._data.to_period(freq)
301
+ return PeriodIndex._simple_new(arr, name=self.name)
302
+
303
+ @doc(DatetimeArray.to_julian_date)
304
+ def to_julian_date(self) -> Index:
305
+ arr = self._data.to_julian_date()
306
+ return Index._simple_new(arr, name=self.name)
307
+
308
+ @doc(DatetimeArray.isocalendar)
309
+ def isocalendar(self) -> DataFrame:
310
+ df = self._data.isocalendar()
311
+ return df.set_index(self)
312
+
313
+ @cache_readonly
314
+ def _resolution_obj(self) -> Resolution:
315
+ return self._data._resolution_obj
316
+
317
+ # --------------------------------------------------------------------
318
+ # Constructors
319
+
320
+ def __new__(
321
+ cls,
322
+ data=None,
323
+ freq: Frequency | lib.NoDefault = lib.no_default,
324
+ tz=lib.no_default,
325
+ normalize: bool | lib.NoDefault = lib.no_default,
326
+ closed=lib.no_default,
327
+ ambiguous: TimeAmbiguous = "raise",
328
+ dayfirst: bool = False,
329
+ yearfirst: bool = False,
330
+ dtype: Dtype | None = None,
331
+ copy: bool = False,
332
+ name: Hashable | None = None,
333
+ ) -> Self:
334
+ if closed is not lib.no_default:
335
+ # GH#52628
336
+ warnings.warn(
337
+ f"The 'closed' keyword in {cls.__name__} construction is "
338
+ "deprecated and will be removed in a future version.",
339
+ FutureWarning,
340
+ stacklevel=find_stack_level(),
341
+ )
342
+ if normalize is not lib.no_default:
343
+ # GH#52628
344
+ warnings.warn(
345
+ f"The 'normalize' keyword in {cls.__name__} construction is "
346
+ "deprecated and will be removed in a future version.",
347
+ FutureWarning,
348
+ stacklevel=find_stack_level(),
349
+ )
350
+
351
+ if is_scalar(data):
352
+ cls._raise_scalar_data_error(data)
353
+
354
+ # - Cases checked above all return/raise before reaching here - #
355
+
356
+ name = maybe_extract_name(name, data, cls)
357
+
358
+ if (
359
+ isinstance(data, DatetimeArray)
360
+ and freq is lib.no_default
361
+ and tz is lib.no_default
362
+ and dtype is None
363
+ ):
364
+ # fastpath, similar logic in TimedeltaIndex.__new__;
365
+ # Note in this particular case we retain non-nano.
366
+ if copy:
367
+ data = data.copy()
368
+ return cls._simple_new(data, name=name)
369
+
370
+ dtarr = DatetimeArray._from_sequence_not_strict(
371
+ data,
372
+ dtype=dtype,
373
+ copy=copy,
374
+ tz=tz,
375
+ freq=freq,
376
+ dayfirst=dayfirst,
377
+ yearfirst=yearfirst,
378
+ ambiguous=ambiguous,
379
+ )
380
+ refs = None
381
+ if not copy and isinstance(data, (Index, ABCSeries)):
382
+ refs = data._references
383
+
384
+ subarr = cls._simple_new(dtarr, name=name, refs=refs)
385
+ return subarr
386
+
387
+ # --------------------------------------------------------------------
388
+
389
+ @cache_readonly
390
+ def _is_dates_only(self) -> bool:
391
+ """
392
+ Return a boolean if we are only dates (and don't have a timezone)
393
+
394
+ Returns
395
+ -------
396
+ bool
397
+ """
398
+ if isinstance(self.freq, Tick):
399
+ delta = Timedelta(self.freq)
400
+
401
+ if delta % dt.timedelta(days=1) != dt.timedelta(days=0):
402
+ return False
403
+
404
+ return self._values._is_dates_only
405
+
406
+ def __reduce__(self):
407
+ d = {"data": self._data, "name": self.name}
408
+ return _new_DatetimeIndex, (type(self), d), None
409
+
410
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
411
+ """
412
+ Can we compare values of the given dtype to our own?
413
+ """
414
+ if self.tz is not None:
415
+ # If we have tz, we can compare to tzaware
416
+ return isinstance(dtype, DatetimeTZDtype)
417
+ # if we dont have tz, we can only compare to tznaive
418
+ return lib.is_np_dtype(dtype, "M")
419
+
420
+ # --------------------------------------------------------------------
421
+ # Rendering Methods
422
+
423
+ @cache_readonly
424
+ def _formatter_func(self):
425
+ # Note this is equivalent to the DatetimeIndexOpsMixin method but
426
+ # uses the maybe-cached self._is_dates_only instead of re-computing it.
427
+ from pandas.io.formats.format import get_format_datetime64
428
+
429
+ formatter = get_format_datetime64(is_dates_only=self._is_dates_only)
430
+ return lambda x: f"'{formatter(x)}'"
431
+
432
+ # --------------------------------------------------------------------
433
+ # Set Operation Methods
434
+
435
+ def _can_range_setop(self, other) -> bool:
436
+ # GH 46702: If self or other have non-UTC tzs, DST transitions prevent
437
+ # range representation due to no singular step
438
+ if (
439
+ self.tz is not None
440
+ and not timezones.is_utc(self.tz)
441
+ and not timezones.is_fixed_offset(self.tz)
442
+ ):
443
+ return False
444
+ if (
445
+ other.tz is not None
446
+ and not timezones.is_utc(other.tz)
447
+ and not timezones.is_fixed_offset(other.tz)
448
+ ):
449
+ return False
450
+ return super()._can_range_setop(other)
451
+
452
+ # --------------------------------------------------------------------
453
+
454
+ def _get_time_micros(self) -> npt.NDArray[np.int64]:
455
+ """
456
+ Return the number of microseconds since midnight.
457
+
458
+ Returns
459
+ -------
460
+ ndarray[int64_t]
461
+ """
462
+ values = self._data._local_timestamps()
463
+
464
+ ppd = periods_per_day(self._data._creso)
465
+
466
+ frac = values % ppd
467
+ if self.unit == "ns":
468
+ micros = frac // 1000
469
+ elif self.unit == "us":
470
+ micros = frac
471
+ elif self.unit == "ms":
472
+ micros = frac * 1000
473
+ elif self.unit == "s":
474
+ micros = frac * 1_000_000
475
+ else: # pragma: no cover
476
+ raise NotImplementedError(self.unit)
477
+
478
+ micros[self._isnan] = -1
479
+ return micros
480
+
481
+ def snap(self, freq: Frequency = "S") -> DatetimeIndex:
482
+ """
483
+ Snap time stamps to nearest occurring frequency.
484
+
485
+ Returns
486
+ -------
487
+ DatetimeIndex
488
+
489
+ Examples
490
+ --------
491
+ >>> idx = pd.DatetimeIndex(['2023-01-01', '2023-01-02',
492
+ ... '2023-02-01', '2023-02-02'])
493
+ >>> idx
494
+ DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],
495
+ dtype='datetime64[ns]', freq=None)
496
+ >>> idx.snap('MS')
497
+ DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],
498
+ dtype='datetime64[ns]', freq=None)
499
+ """
500
+ # Superdumb, punting on any optimizing
501
+ freq = to_offset(freq)
502
+
503
+ dta = self._data.copy()
504
+
505
+ for i, v in enumerate(self):
506
+ s = v
507
+ if not freq.is_on_offset(s):
508
+ t0 = freq.rollback(s)
509
+ t1 = freq.rollforward(s)
510
+ if abs(s - t0) < abs(t1 - s):
511
+ s = t0
512
+ else:
513
+ s = t1
514
+ dta[i] = s
515
+
516
+ return DatetimeIndex._simple_new(dta, name=self.name)
517
+
518
+ # --------------------------------------------------------------------
519
+ # Indexing Methods
520
+
521
+ def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime):
522
+ """
523
+ Calculate datetime bounds for parsed time string and its resolution.
524
+
525
+ Parameters
526
+ ----------
527
+ reso : Resolution
528
+ Resolution provided by parsed string.
529
+ parsed : datetime
530
+ Datetime from parsed string.
531
+
532
+ Returns
533
+ -------
534
+ lower, upper: pd.Timestamp
535
+ """
536
+ freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)
537
+ per = Period(parsed, freq=freq)
538
+ start, end = per.start_time, per.end_time
539
+
540
+ # GH 24076
541
+ # If an incoming date string contained a UTC offset, need to localize
542
+ # the parsed date to this offset first before aligning with the index's
543
+ # timezone
544
+ start = start.tz_localize(parsed.tzinfo)
545
+ end = end.tz_localize(parsed.tzinfo)
546
+
547
+ if parsed.tzinfo is not None:
548
+ if self.tz is None:
549
+ raise ValueError(
550
+ "The index must be timezone aware when indexing "
551
+ "with a date string with a UTC offset"
552
+ )
553
+ # The flipped case with parsed.tz is None and self.tz is not None
554
+ # is ruled out bc parsed and reso are produced by _parse_with_reso,
555
+ # which localizes parsed.
556
+ return start, end
557
+
558
+ def _parse_with_reso(self, label: str):
559
+ parsed, reso = super()._parse_with_reso(label)
560
+
561
+ parsed = Timestamp(parsed)
562
+
563
+ if self.tz is not None and parsed.tzinfo is None:
564
+ # we special-case timezone-naive strings and timezone-aware
565
+ # DatetimeIndex
566
+ # https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081
567
+ parsed = parsed.tz_localize(self.tz)
568
+
569
+ return parsed, reso
570
+
571
+ def _disallow_mismatched_indexing(self, key) -> None:
572
+ """
573
+ Check for mismatched-tzawareness indexing and re-raise as KeyError.
574
+ """
575
+ # we get here with isinstance(key, self._data._recognized_scalars)
576
+ try:
577
+ # GH#36148
578
+ self._data._assert_tzawareness_compat(key)
579
+ except TypeError as err:
580
+ raise KeyError(key) from err
581
+
582
+ def get_loc(self, key):
583
+ """
584
+ Get integer location for requested label
585
+
586
+ Returns
587
+ -------
588
+ loc : int
589
+ """
590
+ self._check_indexing_error(key)
591
+
592
+ orig_key = key
593
+ if is_valid_na_for_dtype(key, self.dtype):
594
+ key = NaT
595
+
596
+ if isinstance(key, self._data._recognized_scalars):
597
+ # needed to localize naive datetimes
598
+ self._disallow_mismatched_indexing(key)
599
+ key = Timestamp(key)
600
+
601
+ elif isinstance(key, str):
602
+ try:
603
+ parsed, reso = self._parse_with_reso(key)
604
+ except (ValueError, pytz.NonExistentTimeError) as err:
605
+ raise KeyError(key) from err
606
+ self._disallow_mismatched_indexing(parsed)
607
+
608
+ if self._can_partial_date_slice(reso):
609
+ try:
610
+ return self._partial_date_slice(reso, parsed)
611
+ except KeyError as err:
612
+ raise KeyError(key) from err
613
+
614
+ key = parsed
615
+
616
+ elif isinstance(key, dt.timedelta):
617
+ # GH#20464
618
+ raise TypeError(
619
+ f"Cannot index {type(self).__name__} with {type(key).__name__}"
620
+ )
621
+
622
+ elif isinstance(key, dt.time):
623
+ return self.indexer_at_time(key)
624
+
625
+ else:
626
+ # unrecognized type
627
+ raise KeyError(key)
628
+
629
+ try:
630
+ return Index.get_loc(self, key)
631
+ except KeyError as err:
632
+ raise KeyError(orig_key) from err
633
+
634
+ @doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound)
635
+ def _maybe_cast_slice_bound(self, label, side: str):
636
+ # GH#42855 handle date here instead of get_slice_bound
637
+ if isinstance(label, dt.date) and not isinstance(label, dt.datetime):
638
+ # Pandas supports slicing with dates, treated as datetimes at midnight.
639
+ # https://github.com/pandas-dev/pandas/issues/31501
640
+ label = Timestamp(label).to_pydatetime()
641
+
642
+ label = super()._maybe_cast_slice_bound(label, side)
643
+ self._data._assert_tzawareness_compat(label)
644
+ return Timestamp(label)
645
+
646
+ def slice_indexer(self, start=None, end=None, step=None):
647
+ """
648
+ Return indexer for specified label slice.
649
+ Index.slice_indexer, customized to handle time slicing.
650
+
651
+ In addition to functionality provided by Index.slice_indexer, does the
652
+ following:
653
+
654
+ - if both `start` and `end` are instances of `datetime.time`, it
655
+ invokes `indexer_between_time`
656
+ - if `start` and `end` are both either string or None perform
657
+ value-based selection in non-monotonic cases.
658
+
659
+ """
660
+ # For historical reasons DatetimeIndex supports slices between two
661
+ # instances of datetime.time as if it were applying a slice mask to
662
+ # an array of (self.hour, self.minute, self.seconds, self.microsecond).
663
+ if isinstance(start, dt.time) and isinstance(end, dt.time):
664
+ if step is not None and step != 1:
665
+ raise ValueError("Must have step size of 1 with time slices")
666
+ return self.indexer_between_time(start, end)
667
+
668
+ if isinstance(start, dt.time) or isinstance(end, dt.time):
669
+ raise KeyError("Cannot mix time and non-time slice keys")
670
+
671
+ def check_str_or_none(point) -> bool:
672
+ return point is not None and not isinstance(point, str)
673
+
674
+ # GH#33146 if start and end are combinations of str and None and Index is not
675
+ # monotonic, we can not use Index.slice_indexer because it does not honor the
676
+ # actual elements, is only searching for start and end
677
+ if (
678
+ check_str_or_none(start)
679
+ or check_str_or_none(end)
680
+ or self.is_monotonic_increasing
681
+ ):
682
+ return Index.slice_indexer(self, start, end, step)
683
+
684
+ mask = np.array(True)
685
+ in_index = True
686
+ if start is not None:
687
+ start_casted = self._maybe_cast_slice_bound(start, "left")
688
+ mask = start_casted <= self
689
+ in_index &= (start_casted == self).any()
690
+
691
+ if end is not None:
692
+ end_casted = self._maybe_cast_slice_bound(end, "right")
693
+ mask = (self <= end_casted) & mask
694
+ in_index &= (end_casted == self).any()
695
+
696
+ if not in_index:
697
+ raise KeyError(
698
+ "Value based partial slicing on non-monotonic DatetimeIndexes "
699
+ "with non-existing keys is not allowed.",
700
+ )
701
+ indexer = mask.nonzero()[0][::step]
702
+ if len(indexer) == len(self):
703
+ return slice(None)
704
+ else:
705
+ return indexer
706
+
707
+ # --------------------------------------------------------------------
708
+
709
+ @property
710
+ def inferred_type(self) -> str:
711
+ # b/c datetime is represented as microseconds since the epoch, make
712
+ # sure we can't have ambiguous indexing
713
+ return "datetime64"
714
+
715
+ def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]:
716
+ """
717
+ Return index locations of values at particular time of day.
718
+
719
+ Parameters
720
+ ----------
721
+ time : datetime.time or str
722
+ Time passed in either as object (datetime.time) or as string in
723
+ appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
724
+ "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
725
+
726
+ Returns
727
+ -------
728
+ np.ndarray[np.intp]
729
+
730
+ See Also
731
+ --------
732
+ indexer_between_time : Get index locations of values between particular
733
+ times of day.
734
+ DataFrame.at_time : Select values at particular time of day.
735
+
736
+ Examples
737
+ --------
738
+ >>> idx = pd.DatetimeIndex(["1/1/2020 10:00", "2/1/2020 11:00",
739
+ ... "3/1/2020 10:00"])
740
+ >>> idx.indexer_at_time("10:00")
741
+ array([0, 2])
742
+ """
743
+ if asof:
744
+ raise NotImplementedError("'asof' argument is not supported")
745
+
746
+ if isinstance(time, str):
747
+ from dateutil.parser import parse
748
+
749
+ time = parse(time).time()
750
+
751
+ if time.tzinfo:
752
+ if self.tz is None:
753
+ raise ValueError("Index must be timezone aware.")
754
+ time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
755
+ else:
756
+ time_micros = self._get_time_micros()
757
+ micros = _time_to_micros(time)
758
+ return (time_micros == micros).nonzero()[0]
759
+
760
+ def indexer_between_time(
761
+ self, start_time, end_time, include_start: bool = True, include_end: bool = True
762
+ ) -> npt.NDArray[np.intp]:
763
+ """
764
+ Return index locations of values between particular times of day.
765
+
766
+ Parameters
767
+ ----------
768
+ start_time, end_time : datetime.time, str
769
+ Time passed either as object (datetime.time) or as string in
770
+ appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
771
+ "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
772
+ include_start : bool, default True
773
+ include_end : bool, default True
774
+
775
+ Returns
776
+ -------
777
+ np.ndarray[np.intp]
778
+
779
+ See Also
780
+ --------
781
+ indexer_at_time : Get index locations of values at particular time of day.
782
+ DataFrame.between_time : Select values between particular times of day.
783
+
784
+ Examples
785
+ --------
786
+ >>> idx = pd.date_range("2023-01-01", periods=4, freq="h")
787
+ >>> idx
788
+ DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00',
789
+ '2023-01-01 02:00:00', '2023-01-01 03:00:00'],
790
+ dtype='datetime64[ns]', freq='h')
791
+ >>> idx.indexer_between_time("00:00", "2:00", include_end=False)
792
+ array([0, 1])
793
+ """
794
+ start_time = to_time(start_time)
795
+ end_time = to_time(end_time)
796
+ time_micros = self._get_time_micros()
797
+ start_micros = _time_to_micros(start_time)
798
+ end_micros = _time_to_micros(end_time)
799
+
800
+ if include_start and include_end:
801
+ lop = rop = operator.le
802
+ elif include_start:
803
+ lop = operator.le
804
+ rop = operator.lt
805
+ elif include_end:
806
+ lop = operator.lt
807
+ rop = operator.le
808
+ else:
809
+ lop = rop = operator.lt
810
+
811
+ if start_time <= end_time:
812
+ join_op = operator.and_
813
+ else:
814
+ join_op = operator.or_
815
+
816
+ mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
817
+
818
+ return mask.nonzero()[0]
819
+
820
+
821
+ def date_range(
822
+ start=None,
823
+ end=None,
824
+ periods=None,
825
+ freq=None,
826
+ tz=None,
827
+ normalize: bool = False,
828
+ name: Hashable | None = None,
829
+ inclusive: IntervalClosedType = "both",
830
+ *,
831
+ unit: str | None = None,
832
+ **kwargs,
833
+ ) -> DatetimeIndex:
834
+ """
835
+ Return a fixed frequency DatetimeIndex.
836
+
837
+ Returns the range of equally spaced time points (where the difference between any
838
+ two adjacent points is specified by the given frequency) such that they all
839
+ satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,
840
+ the first and last time points in that range that fall on the boundary of ``freq``
841
+ (if given as a frequency string) or that are valid for ``freq`` (if given as a
842
+ :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,
843
+ ``end``, or ``freq`` is *not* specified, this missing parameter can be computed
844
+ given ``periods``, the number of timesteps in the range. See the note below.)
845
+
846
+ Parameters
847
+ ----------
848
+ start : str or datetime-like, optional
849
+ Left bound for generating dates.
850
+ end : str or datetime-like, optional
851
+ Right bound for generating dates.
852
+ periods : int, optional
853
+ Number of periods to generate.
854
+ freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
855
+ Frequency strings can have multiples, e.g. '5h'. See
856
+ :ref:`here <timeseries.offset_aliases>` for a list of
857
+ frequency aliases.
858
+ tz : str or tzinfo, optional
859
+ Time zone name for returning localized DatetimeIndex, for example
860
+ 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
861
+ timezone-naive unless timezone-aware datetime-likes are passed.
862
+ normalize : bool, default False
863
+ Normalize start/end dates to midnight before generating date range.
864
+ name : str, default None
865
+ Name of the resulting DatetimeIndex.
866
+ inclusive : {"both", "neither", "left", "right"}, default "both"
867
+ Include boundaries; Whether to set each bound as closed or open.
868
+
869
+ .. versionadded:: 1.4.0
870
+ unit : str, default None
871
+ Specify the desired resolution of the result.
872
+
873
+ .. versionadded:: 2.0.0
874
+ **kwargs
875
+ For compatibility. Has no effect on the result.
876
+
877
+ Returns
878
+ -------
879
+ DatetimeIndex
880
+
881
+ See Also
882
+ --------
883
+ DatetimeIndex : An immutable container for datetimes.
884
+ timedelta_range : Return a fixed frequency TimedeltaIndex.
885
+ period_range : Return a fixed frequency PeriodIndex.
886
+ interval_range : Return a fixed frequency IntervalIndex.
887
+
888
+ Notes
889
+ -----
890
+ Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
891
+ exactly three must be specified. If ``freq`` is omitted, the resulting
892
+ ``DatetimeIndex`` will have ``periods`` linearly spaced elements between
893
+ ``start`` and ``end`` (closed on both sides).
894
+
895
+ To learn more about the frequency strings, please see `this link
896
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
897
+
898
+ Examples
899
+ --------
900
+ **Specifying the values**
901
+
902
+ The next four examples generate the same `DatetimeIndex`, but vary
903
+ the combination of `start`, `end` and `periods`.
904
+
905
+ Specify `start` and `end`, with the default daily frequency.
906
+
907
+ >>> pd.date_range(start='1/1/2018', end='1/08/2018')
908
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
909
+ '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
910
+ dtype='datetime64[ns]', freq='D')
911
+
912
+ Specify timezone-aware `start` and `end`, with the default daily frequency.
913
+
914
+ >>> pd.date_range(
915
+ ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"),
916
+ ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"),
917
+ ... )
918
+ DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00',
919
+ '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00',
920
+ '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00',
921
+ '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'],
922
+ dtype='datetime64[ns, Europe/Berlin]', freq='D')
923
+
924
+ Specify `start` and `periods`, the number of periods (days).
925
+
926
+ >>> pd.date_range(start='1/1/2018', periods=8)
927
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
928
+ '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
929
+ dtype='datetime64[ns]', freq='D')
930
+
931
+ Specify `end` and `periods`, the number of periods (days).
932
+
933
+ >>> pd.date_range(end='1/1/2018', periods=8)
934
+ DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
935
+ '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
936
+ dtype='datetime64[ns]', freq='D')
937
+
938
+ Specify `start`, `end`, and `periods`; the frequency is generated
939
+ automatically (linearly spaced).
940
+
941
+ >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
942
+ DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
943
+ '2018-04-27 00:00:00'],
944
+ dtype='datetime64[ns]', freq=None)
945
+
946
+ **Other Parameters**
947
+
948
+ Changed the `freq` (frequency) to ``'ME'`` (month end frequency).
949
+
950
+ >>> pd.date_range(start='1/1/2018', periods=5, freq='ME')
951
+ DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
952
+ '2018-05-31'],
953
+ dtype='datetime64[ns]', freq='ME')
954
+
955
+ Multiples are allowed
956
+
957
+ >>> pd.date_range(start='1/1/2018', periods=5, freq='3ME')
958
+ DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
959
+ '2019-01-31'],
960
+ dtype='datetime64[ns]', freq='3ME')
961
+
962
+ `freq` can also be specified as an Offset object.
963
+
964
+ >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
965
+ DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
966
+ '2019-01-31'],
967
+ dtype='datetime64[ns]', freq='3ME')
968
+
969
+ Specify `tz` to set the timezone.
970
+
971
+ >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
972
+ DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
973
+ '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
974
+ '2018-01-05 00:00:00+09:00'],
975
+ dtype='datetime64[ns, Asia/Tokyo]', freq='D')
976
+
977
+ `inclusive` controls whether to include `start` and `end` that are on the
978
+ boundary. The default, "both", includes boundary points on either end.
979
+
980
+ >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both")
981
+ DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
982
+ dtype='datetime64[ns]', freq='D')
983
+
984
+ Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.
985
+
986
+ >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left')
987
+ DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
988
+ dtype='datetime64[ns]', freq='D')
989
+
990
+ Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and
991
+ similarly ``inclusive='neither'`` will exclude both `start` and `end`.
992
+
993
+ >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right')
994
+ DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
995
+ dtype='datetime64[ns]', freq='D')
996
+
997
+ **Specify a unit**
998
+
999
+ >>> pd.date_range(start="2017-01-01", periods=10, freq="100YS", unit="s")
1000
+ DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01',
1001
+ '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01',
1002
+ '2817-01-01', '2917-01-01'],
1003
+ dtype='datetime64[s]', freq='100YS-JAN')
1004
+ """
1005
+ if freq is None and com.any_none(periods, start, end):
1006
+ freq = "D"
1007
+
1008
+ dtarr = DatetimeArray._generate_range(
1009
+ start=start,
1010
+ end=end,
1011
+ periods=periods,
1012
+ freq=freq,
1013
+ tz=tz,
1014
+ normalize=normalize,
1015
+ inclusive=inclusive,
1016
+ unit=unit,
1017
+ **kwargs,
1018
+ )
1019
+ return DatetimeIndex._simple_new(dtarr, name=name)
1020
+
1021
+
1022
+ def bdate_range(
1023
+ start=None,
1024
+ end=None,
1025
+ periods: int | None = None,
1026
+ freq: Frequency | dt.timedelta = "B",
1027
+ tz=None,
1028
+ normalize: bool = True,
1029
+ name: Hashable | None = None,
1030
+ weekmask=None,
1031
+ holidays=None,
1032
+ inclusive: IntervalClosedType = "both",
1033
+ **kwargs,
1034
+ ) -> DatetimeIndex:
1035
+ """
1036
+ Return a fixed frequency DatetimeIndex with business day as the default.
1037
+
1038
+ Parameters
1039
+ ----------
1040
+ start : str or datetime-like, default None
1041
+ Left bound for generating dates.
1042
+ end : str or datetime-like, default None
1043
+ Right bound for generating dates.
1044
+ periods : int, default None
1045
+ Number of periods to generate.
1046
+ freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B'
1047
+ Frequency strings can have multiples, e.g. '5h'. The default is
1048
+ business daily ('B').
1049
+ tz : str or None
1050
+ Time zone name for returning localized DatetimeIndex, for example
1051
+ Asia/Beijing.
1052
+ normalize : bool, default False
1053
+ Normalize start/end dates to midnight before generating date range.
1054
+ name : str, default None
1055
+ Name of the resulting DatetimeIndex.
1056
+ weekmask : str or None, default None
1057
+ Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
1058
+ only used when custom frequency strings are passed. The default
1059
+ value None is equivalent to 'Mon Tue Wed Thu Fri'.
1060
+ holidays : list-like or None, default None
1061
+ Dates to exclude from the set of valid business days, passed to
1062
+ ``numpy.busdaycalendar``, only used when custom frequency strings
1063
+ are passed.
1064
+ inclusive : {"both", "neither", "left", "right"}, default "both"
1065
+ Include boundaries; Whether to set each bound as closed or open.
1066
+
1067
+ .. versionadded:: 1.4.0
1068
+ **kwargs
1069
+ For compatibility. Has no effect on the result.
1070
+
1071
+ Returns
1072
+ -------
1073
+ DatetimeIndex
1074
+
1075
+ Notes
1076
+ -----
1077
+ Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
1078
+ exactly three must be specified. Specifying ``freq`` is a requirement
1079
+ for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
1080
+ desired.
1081
+
1082
+ To learn more about the frequency strings, please see `this link
1083
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
1084
+
1085
+ Examples
1086
+ --------
1087
+ Note how the two weekend days are skipped in the result.
1088
+
1089
+ >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
1090
+ DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
1091
+ '2018-01-05', '2018-01-08'],
1092
+ dtype='datetime64[ns]', freq='B')
1093
+ """
1094
+ if freq is None:
1095
+ msg = "freq must be specified for bdate_range; use date_range instead"
1096
+ raise TypeError(msg)
1097
+
1098
+ if isinstance(freq, str) and freq.startswith("C"):
1099
+ try:
1100
+ weekmask = weekmask or "Mon Tue Wed Thu Fri"
1101
+ freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
1102
+ except (KeyError, TypeError) as err:
1103
+ msg = f"invalid custom frequency string: {freq}"
1104
+ raise ValueError(msg) from err
1105
+ elif holidays or weekmask:
1106
+ msg = (
1107
+ "a custom frequency string is required when holidays or "
1108
+ f"weekmask are passed, got frequency {freq}"
1109
+ )
1110
+ raise ValueError(msg)
1111
+
1112
+ return date_range(
1113
+ start=start,
1114
+ end=end,
1115
+ periods=periods,
1116
+ freq=freq,
1117
+ tz=tz,
1118
+ normalize=normalize,
1119
+ name=name,
1120
+ inclusive=inclusive,
1121
+ **kwargs,
1122
+ )
1123
+
1124
+
1125
+ def _time_to_micros(time_obj: dt.time) -> int:
1126
+ seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
1127
+ return 1_000_000 * seconds + time_obj.microsecond
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/extension.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Shared methods for Index subclasses backed by ExtensionArray.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Callable,
9
+ TypeVar,
10
+ )
11
+
12
+ from pandas.util._decorators import cache_readonly
13
+
14
+ from pandas.core.dtypes.generic import ABCDataFrame
15
+
16
+ from pandas.core.indexes.base import Index
17
+
18
+ if TYPE_CHECKING:
19
+ import numpy as np
20
+
21
+ from pandas._typing import (
22
+ ArrayLike,
23
+ npt,
24
+ )
25
+
26
+ from pandas.core.arrays import IntervalArray
27
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
28
+
29
+ _ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex")
30
+
31
+
32
+ def _inherit_from_data(
33
+ name: str, delegate: type, cache: bool = False, wrap: bool = False
34
+ ):
35
+ """
36
+ Make an alias for a method of the underlying ExtensionArray.
37
+
38
+ Parameters
39
+ ----------
40
+ name : str
41
+ Name of an attribute the class should inherit from its EA parent.
42
+ delegate : class
43
+ cache : bool, default False
44
+ Whether to convert wrapped properties into cache_readonly
45
+ wrap : bool, default False
46
+ Whether to wrap the inherited result in an Index.
47
+
48
+ Returns
49
+ -------
50
+ attribute, method, property, or cache_readonly
51
+ """
52
+ attr = getattr(delegate, name)
53
+
54
+ if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
55
+ # getset_descriptor i.e. property defined in cython class
56
+ if cache:
57
+
58
+ def cached(self):
59
+ return getattr(self._data, name)
60
+
61
+ cached.__name__ = name
62
+ cached.__doc__ = attr.__doc__
63
+ method = cache_readonly(cached)
64
+
65
+ else:
66
+
67
+ def fget(self):
68
+ result = getattr(self._data, name)
69
+ if wrap:
70
+ if isinstance(result, type(self._data)):
71
+ return type(self)._simple_new(result, name=self.name)
72
+ elif isinstance(result, ABCDataFrame):
73
+ return result.set_index(self)
74
+ return Index(result, name=self.name)
75
+ return result
76
+
77
+ def fset(self, value) -> None:
78
+ setattr(self._data, name, value)
79
+
80
+ fget.__name__ = name
81
+ fget.__doc__ = attr.__doc__
82
+
83
+ method = property(fget, fset)
84
+
85
+ elif not callable(attr):
86
+ # just a normal attribute, no wrapping
87
+ method = attr
88
+
89
+ else:
90
+ # error: Incompatible redefinition (redefinition with type "Callable[[Any,
91
+ # VarArg(Any), KwArg(Any)], Any]", original type "property")
92
+ def method(self, *args, **kwargs): # type: ignore[misc]
93
+ if "inplace" in kwargs:
94
+ raise ValueError(f"cannot use inplace with {type(self).__name__}")
95
+ result = attr(self._data, *args, **kwargs)
96
+ if wrap:
97
+ if isinstance(result, type(self._data)):
98
+ return type(self)._simple_new(result, name=self.name)
99
+ elif isinstance(result, ABCDataFrame):
100
+ return result.set_index(self)
101
+ return Index(result, name=self.name)
102
+ return result
103
+
104
+ # error: "property" has no attribute "__name__"
105
+ method.__name__ = name # type: ignore[attr-defined]
106
+ method.__doc__ = attr.__doc__
107
+ return method
108
+
109
+
110
+ def inherit_names(
111
+ names: list[str], delegate: type, cache: bool = False, wrap: bool = False
112
+ ) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]:
113
+ """
114
+ Class decorator to pin attributes from an ExtensionArray to a Index subclass.
115
+
116
+ Parameters
117
+ ----------
118
+ names : List[str]
119
+ delegate : class
120
+ cache : bool, default False
121
+ wrap : bool, default False
122
+ Whether to wrap the inherited result in an Index.
123
+ """
124
+
125
+ def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]:
126
+ for name in names:
127
+ meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap)
128
+ setattr(cls, name, meth)
129
+
130
+ return cls
131
+
132
+ return wrapper
133
+
134
+
135
+ class ExtensionIndex(Index):
136
+ """
137
+ Index subclass for indexes backed by ExtensionArray.
138
+ """
139
+
140
+ # The base class already passes through to _data:
141
+ # size, __len__, dtype
142
+
143
+ _data: IntervalArray | NDArrayBackedExtensionArray
144
+
145
+ # ---------------------------------------------------------------------
146
+
147
+ def _validate_fill_value(self, value):
148
+ """
149
+ Convert value to be insertable to underlying array.
150
+ """
151
+ return self._data._validate_setitem_value(value)
152
+
153
+ @cache_readonly
154
+ def _isnan(self) -> npt.NDArray[np.bool_]:
155
+ # error: Incompatible return value type (got "ExtensionArray", expected
156
+ # "ndarray")
157
+ return self._data.isna() # type: ignore[return-value]
158
+
159
+
160
+ class NDArrayBackedExtensionIndex(ExtensionIndex):
161
+ """
162
+ Index subclass for indexes backed by NDArrayBackedExtensionArray.
163
+ """
164
+
165
+ _data: NDArrayBackedExtensionArray
166
+
167
+ def _get_engine_target(self) -> np.ndarray:
168
+ return self._data._ndarray
169
+
170
+ def _from_join_target(self, result: np.ndarray) -> ArrayLike:
171
+ assert result.dtype == self._data._ndarray.dtype
172
+ return self._data._from_backing_data(result)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/frozen.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ frozen (immutable) data structures to support MultiIndexing
3
+
4
+ These are used for:
5
+
6
+ - .names (FrozenList)
7
+
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ NoReturn,
14
+ )
15
+
16
+ from pandas.core.base import PandasObject
17
+
18
+ from pandas.io.formats.printing import pprint_thing
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import Self
22
+
23
+
24
+ class FrozenList(PandasObject, list):
25
+ """
26
+ Container that doesn't allow setting item *but*
27
+ because it's technically hashable, will be used
28
+ for lookups, appropriately, etc.
29
+ """
30
+
31
+ # Side note: This has to be of type list. Otherwise,
32
+ # it messes up PyTables type checks.
33
+
34
+ def union(self, other) -> FrozenList:
35
+ """
36
+ Returns a FrozenList with other concatenated to the end of self.
37
+
38
+ Parameters
39
+ ----------
40
+ other : array-like
41
+ The array-like whose elements we are concatenating.
42
+
43
+ Returns
44
+ -------
45
+ FrozenList
46
+ The collection difference between self and other.
47
+ """
48
+ if isinstance(other, tuple):
49
+ other = list(other)
50
+ return type(self)(super().__add__(other))
51
+
52
+ def difference(self, other) -> FrozenList:
53
+ """
54
+ Returns a FrozenList with elements from other removed from self.
55
+
56
+ Parameters
57
+ ----------
58
+ other : array-like
59
+ The array-like whose elements we are removing self.
60
+
61
+ Returns
62
+ -------
63
+ FrozenList
64
+ The collection difference between self and other.
65
+ """
66
+ other = set(other)
67
+ temp = [x for x in self if x not in other]
68
+ return type(self)(temp)
69
+
70
+ # TODO: Consider deprecating these in favor of `union` (xref gh-15506)
71
+ # error: Incompatible types in assignment (expression has type
72
+ # "Callable[[FrozenList, Any], FrozenList]", base class "list" defined the
73
+ # type as overloaded function)
74
+ __add__ = __iadd__ = union # type: ignore[assignment]
75
+
76
+ def __getitem__(self, n):
77
+ if isinstance(n, slice):
78
+ return type(self)(super().__getitem__(n))
79
+ return super().__getitem__(n)
80
+
81
+ def __radd__(self, other) -> Self:
82
+ if isinstance(other, tuple):
83
+ other = list(other)
84
+ return type(self)(other + list(self))
85
+
86
+ def __eq__(self, other: object) -> bool:
87
+ if isinstance(other, (tuple, FrozenList)):
88
+ other = list(other)
89
+ return super().__eq__(other)
90
+
91
+ __req__ = __eq__
92
+
93
+ def __mul__(self, other) -> Self:
94
+ return type(self)(super().__mul__(other))
95
+
96
+ __imul__ = __mul__
97
+
98
+ def __reduce__(self):
99
+ return type(self), (list(self),)
100
+
101
+ # error: Signature of "__hash__" incompatible with supertype "list"
102
+ def __hash__(self) -> int: # type: ignore[override]
103
+ return hash(tuple(self))
104
+
105
+ def _disabled(self, *args, **kwargs) -> NoReturn:
106
+ """
107
+ This method will not function because object is immutable.
108
+ """
109
+ raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")
110
+
111
+ def __str__(self) -> str:
112
+ return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))
113
+
114
+ def __repr__(self) -> str:
115
+ return f"{type(self).__name__}({str(self)})"
116
+
117
+ __setitem__ = __setslice__ = _disabled # type: ignore[assignment]
118
+ __delitem__ = __delslice__ = _disabled
119
+ pop = append = extend = _disabled
120
+ remove = sort = insert = _disabled # type: ignore[assignment]
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/interval.py ADDED
@@ -0,0 +1,1136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ define the IntervalIndex """
2
+ from __future__ import annotations
3
+
4
+ from operator import (
5
+ le,
6
+ lt,
7
+ )
8
+ import textwrap
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Literal,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from pandas._libs import lib
18
+ from pandas._libs.interval import (
19
+ Interval,
20
+ IntervalMixin,
21
+ IntervalTree,
22
+ )
23
+ from pandas._libs.tslibs import (
24
+ BaseOffset,
25
+ Period,
26
+ Timedelta,
27
+ Timestamp,
28
+ to_offset,
29
+ )
30
+ from pandas.errors import InvalidIndexError
31
+ from pandas.util._decorators import (
32
+ Appender,
33
+ cache_readonly,
34
+ )
35
+ from pandas.util._exceptions import rewrite_exception
36
+
37
+ from pandas.core.dtypes.cast import (
38
+ find_common_type,
39
+ infer_dtype_from_scalar,
40
+ maybe_box_datetimelike,
41
+ maybe_downcast_numeric,
42
+ maybe_upcast_numeric_to_64bit,
43
+ )
44
+ from pandas.core.dtypes.common import (
45
+ ensure_platform_int,
46
+ is_float_dtype,
47
+ is_integer,
48
+ is_integer_dtype,
49
+ is_list_like,
50
+ is_number,
51
+ is_object_dtype,
52
+ is_scalar,
53
+ pandas_dtype,
54
+ )
55
+ from pandas.core.dtypes.dtypes import (
56
+ DatetimeTZDtype,
57
+ IntervalDtype,
58
+ )
59
+ from pandas.core.dtypes.missing import is_valid_na_for_dtype
60
+
61
+ from pandas.core.algorithms import unique
62
+ from pandas.core.arrays.datetimelike import validate_periods
63
+ from pandas.core.arrays.interval import (
64
+ IntervalArray,
65
+ _interval_shared_docs,
66
+ )
67
+ import pandas.core.common as com
68
+ from pandas.core.indexers import is_valid_positional_slice
69
+ import pandas.core.indexes.base as ibase
70
+ from pandas.core.indexes.base import (
71
+ Index,
72
+ _index_shared_docs,
73
+ ensure_index,
74
+ maybe_extract_name,
75
+ )
76
+ from pandas.core.indexes.datetimes import (
77
+ DatetimeIndex,
78
+ date_range,
79
+ )
80
+ from pandas.core.indexes.extension import (
81
+ ExtensionIndex,
82
+ inherit_names,
83
+ )
84
+ from pandas.core.indexes.multi import MultiIndex
85
+ from pandas.core.indexes.timedeltas import (
86
+ TimedeltaIndex,
87
+ timedelta_range,
88
+ )
89
+
90
+ if TYPE_CHECKING:
91
+ from collections.abc import Hashable
92
+
93
+ from pandas._typing import (
94
+ Dtype,
95
+ DtypeObj,
96
+ IntervalClosedType,
97
+ Self,
98
+ npt,
99
+ )
100
+ _index_doc_kwargs = dict(ibase._index_doc_kwargs)
101
+
102
+ _index_doc_kwargs.update(
103
+ {
104
+ "klass": "IntervalIndex",
105
+ "qualname": "IntervalIndex",
106
+ "target_klass": "IntervalIndex or list of Intervals",
107
+ "name": textwrap.dedent(
108
+ """\
109
+ name : object, optional
110
+ Name to be stored in the index.
111
+ """
112
+ ),
113
+ }
114
+ )
115
+
116
+
117
+ def _get_next_label(label):
118
+ # see test_slice_locs_with_ints_and_floats_succeeds
119
+ dtype = getattr(label, "dtype", type(label))
120
+ if isinstance(label, (Timestamp, Timedelta)):
121
+ dtype = "datetime64[ns]"
122
+ dtype = pandas_dtype(dtype)
123
+
124
+ if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
125
+ return label + np.timedelta64(1, "ns")
126
+ elif is_integer_dtype(dtype):
127
+ return label + 1
128
+ elif is_float_dtype(dtype):
129
+ return np.nextafter(label, np.inf)
130
+ else:
131
+ raise TypeError(f"cannot determine next label for type {repr(type(label))}")
132
+
133
+
134
+ def _get_prev_label(label):
135
+ # see test_slice_locs_with_ints_and_floats_succeeds
136
+ dtype = getattr(label, "dtype", type(label))
137
+ if isinstance(label, (Timestamp, Timedelta)):
138
+ dtype = "datetime64[ns]"
139
+ dtype = pandas_dtype(dtype)
140
+
141
+ if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype):
142
+ return label - np.timedelta64(1, "ns")
143
+ elif is_integer_dtype(dtype):
144
+ return label - 1
145
+ elif is_float_dtype(dtype):
146
+ return np.nextafter(label, -np.inf)
147
+ else:
148
+ raise TypeError(f"cannot determine next label for type {repr(type(label))}")
149
+
150
+
151
+ def _new_IntervalIndex(cls, d):
152
+ """
153
+ This is called upon unpickling, rather than the default which doesn't have
154
+ arguments and breaks __new__.
155
+ """
156
+ return cls.from_arrays(**d)
157
+
158
+
159
+ @Appender(
160
+ _interval_shared_docs["class"]
161
+ % {
162
+ "klass": "IntervalIndex",
163
+ "summary": "Immutable index of intervals that are closed on the same side.",
164
+ "name": _index_doc_kwargs["name"],
165
+ "extra_attributes": "is_overlapping\nvalues\n",
166
+ "extra_methods": "",
167
+ "examples": textwrap.dedent(
168
+ """\
169
+ Examples
170
+ --------
171
+ A new ``IntervalIndex`` is typically constructed using
172
+ :func:`interval_range`:
173
+
174
+ >>> pd.interval_range(start=0, end=5)
175
+ IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
176
+ dtype='interval[int64, right]')
177
+
178
+ It may also be constructed using one of the constructor
179
+ methods: :meth:`IntervalIndex.from_arrays`,
180
+ :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
181
+
182
+ See further examples in the doc strings of ``interval_range`` and the
183
+ mentioned constructor methods.
184
+ """
185
+ ),
186
+ }
187
+ )
188
+ @inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
189
+ @inherit_names(
190
+ [
191
+ "__array__",
192
+ "overlaps",
193
+ "contains",
194
+ "closed_left",
195
+ "closed_right",
196
+ "open_left",
197
+ "open_right",
198
+ "is_empty",
199
+ ],
200
+ IntervalArray,
201
+ )
202
+ @inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True)
203
+ class IntervalIndex(ExtensionIndex):
204
+ _typ = "intervalindex"
205
+
206
+ # annotate properties pinned via inherit_names
207
+ closed: IntervalClosedType
208
+ is_non_overlapping_monotonic: bool
209
+ closed_left: bool
210
+ closed_right: bool
211
+ open_left: bool
212
+ open_right: bool
213
+
214
+ _data: IntervalArray
215
+ _values: IntervalArray
216
+ _can_hold_strings = False
217
+ _data_cls = IntervalArray
218
+
219
+ # --------------------------------------------------------------------
220
+ # Constructors
221
+
222
+ def __new__(
223
+ cls,
224
+ data,
225
+ closed: IntervalClosedType | None = None,
226
+ dtype: Dtype | None = None,
227
+ copy: bool = False,
228
+ name: Hashable | None = None,
229
+ verify_integrity: bool = True,
230
+ ) -> Self:
231
+ name = maybe_extract_name(name, data, cls)
232
+
233
+ with rewrite_exception("IntervalArray", cls.__name__):
234
+ array = IntervalArray(
235
+ data,
236
+ closed=closed,
237
+ copy=copy,
238
+ dtype=dtype,
239
+ verify_integrity=verify_integrity,
240
+ )
241
+
242
+ return cls._simple_new(array, name)
243
+
244
+ @classmethod
245
+ @Appender(
246
+ _interval_shared_docs["from_breaks"]
247
+ % {
248
+ "klass": "IntervalIndex",
249
+ "name": textwrap.dedent(
250
+ """
251
+ name : str, optional
252
+ Name of the resulting IntervalIndex."""
253
+ ),
254
+ "examples": textwrap.dedent(
255
+ """\
256
+ Examples
257
+ --------
258
+ >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
259
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
260
+ dtype='interval[int64, right]')
261
+ """
262
+ ),
263
+ }
264
+ )
265
+ def from_breaks(
266
+ cls,
267
+ breaks,
268
+ closed: IntervalClosedType | None = "right",
269
+ name: Hashable | None = None,
270
+ copy: bool = False,
271
+ dtype: Dtype | None = None,
272
+ ) -> IntervalIndex:
273
+ with rewrite_exception("IntervalArray", cls.__name__):
274
+ array = IntervalArray.from_breaks(
275
+ breaks, closed=closed, copy=copy, dtype=dtype
276
+ )
277
+ return cls._simple_new(array, name=name)
278
+
279
+ @classmethod
280
+ @Appender(
281
+ _interval_shared_docs["from_arrays"]
282
+ % {
283
+ "klass": "IntervalIndex",
284
+ "name": textwrap.dedent(
285
+ """
286
+ name : str, optional
287
+ Name of the resulting IntervalIndex."""
288
+ ),
289
+ "examples": textwrap.dedent(
290
+ """\
291
+ Examples
292
+ --------
293
+ >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
294
+ IntervalIndex([(0, 1], (1, 2], (2, 3]],
295
+ dtype='interval[int64, right]')
296
+ """
297
+ ),
298
+ }
299
+ )
300
+ def from_arrays(
301
+ cls,
302
+ left,
303
+ right,
304
+ closed: IntervalClosedType = "right",
305
+ name: Hashable | None = None,
306
+ copy: bool = False,
307
+ dtype: Dtype | None = None,
308
+ ) -> IntervalIndex:
309
+ with rewrite_exception("IntervalArray", cls.__name__):
310
+ array = IntervalArray.from_arrays(
311
+ left, right, closed, copy=copy, dtype=dtype
312
+ )
313
+ return cls._simple_new(array, name=name)
314
+
315
+ @classmethod
316
+ @Appender(
317
+ _interval_shared_docs["from_tuples"]
318
+ % {
319
+ "klass": "IntervalIndex",
320
+ "name": textwrap.dedent(
321
+ """
322
+ name : str, optional
323
+ Name of the resulting IntervalIndex."""
324
+ ),
325
+ "examples": textwrap.dedent(
326
+ """\
327
+ Examples
328
+ --------
329
+ >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
330
+ IntervalIndex([(0, 1], (1, 2]],
331
+ dtype='interval[int64, right]')
332
+ """
333
+ ),
334
+ }
335
+ )
336
+ def from_tuples(
337
+ cls,
338
+ data,
339
+ closed: IntervalClosedType = "right",
340
+ name: Hashable | None = None,
341
+ copy: bool = False,
342
+ dtype: Dtype | None = None,
343
+ ) -> IntervalIndex:
344
+ with rewrite_exception("IntervalArray", cls.__name__):
345
+ arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
346
+ return cls._simple_new(arr, name=name)
347
+
348
+ # --------------------------------------------------------------------
349
+ # error: Return type "IntervalTree" of "_engine" incompatible with return type
350
+ # "Union[IndexEngine, ExtensionEngine]" in supertype "Index"
351
+ @cache_readonly
352
+ def _engine(self) -> IntervalTree: # type: ignore[override]
353
+ # IntervalTree does not supports numpy array unless they are 64 bit
354
+ left = self._maybe_convert_i8(self.left)
355
+ left = maybe_upcast_numeric_to_64bit(left)
356
+ right = self._maybe_convert_i8(self.right)
357
+ right = maybe_upcast_numeric_to_64bit(right)
358
+ return IntervalTree(left, right, closed=self.closed)
359
+
360
+ def __contains__(self, key: Any) -> bool:
361
+ """
362
+ return a boolean if this key is IN the index
363
+ We *only* accept an Interval
364
+
365
+ Parameters
366
+ ----------
367
+ key : Interval
368
+
369
+ Returns
370
+ -------
371
+ bool
372
+ """
373
+ hash(key)
374
+ if not isinstance(key, Interval):
375
+ if is_valid_na_for_dtype(key, self.dtype):
376
+ return self.hasnans
377
+ return False
378
+
379
+ try:
380
+ self.get_loc(key)
381
+ return True
382
+ except KeyError:
383
+ return False
384
+
385
+ def _getitem_slice(self, slobj: slice) -> IntervalIndex:
386
+ """
387
+ Fastpath for __getitem__ when we know we have a slice.
388
+ """
389
+ res = self._data[slobj]
390
+ return type(self)._simple_new(res, name=self._name)
391
+
392
+ @cache_readonly
393
+ def _multiindex(self) -> MultiIndex:
394
+ return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
395
+
396
+ def __reduce__(self):
397
+ d = {
398
+ "left": self.left,
399
+ "right": self.right,
400
+ "closed": self.closed,
401
+ "name": self.name,
402
+ }
403
+ return _new_IntervalIndex, (type(self), d), None
404
+
405
+ @property
406
+ def inferred_type(self) -> str:
407
+ """Return a string of the type inferred from the values"""
408
+ return "interval"
409
+
410
+ # Cannot determine type of "memory_usage"
411
+ @Appender(Index.memory_usage.__doc__) # type: ignore[has-type]
412
+ def memory_usage(self, deep: bool = False) -> int:
413
+ # we don't use an explicit engine
414
+ # so return the bytes here
415
+ return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
416
+
417
+ # IntervalTree doesn't have a is_monotonic_decreasing, so have to override
418
+ # the Index implementation
419
+ @cache_readonly
420
+ def is_monotonic_decreasing(self) -> bool:
421
+ """
422
+ Return True if the IntervalIndex is monotonic decreasing (only equal or
423
+ decreasing values), else False
424
+ """
425
+ return self[::-1].is_monotonic_increasing
426
+
427
+ @cache_readonly
428
+ def is_unique(self) -> bool:
429
+ """
430
+ Return True if the IntervalIndex contains unique elements, else False.
431
+ """
432
+ left = self.left
433
+ right = self.right
434
+
435
+ if self.isna().sum() > 1:
436
+ return False
437
+
438
+ if left.is_unique or right.is_unique:
439
+ return True
440
+
441
+ seen_pairs = set()
442
+ check_idx = np.where(left.duplicated(keep=False))[0]
443
+ for idx in check_idx:
444
+ pair = (left[idx], right[idx])
445
+ if pair in seen_pairs:
446
+ return False
447
+ seen_pairs.add(pair)
448
+
449
+ return True
450
+
451
+ @property
452
+ def is_overlapping(self) -> bool:
453
+ """
454
+ Return True if the IntervalIndex has overlapping intervals, else False.
455
+
456
+ Two intervals overlap if they share a common point, including closed
457
+ endpoints. Intervals that only have an open endpoint in common do not
458
+ overlap.
459
+
460
+ Returns
461
+ -------
462
+ bool
463
+ Boolean indicating if the IntervalIndex has overlapping intervals.
464
+
465
+ See Also
466
+ --------
467
+ Interval.overlaps : Check whether two Interval objects overlap.
468
+ IntervalIndex.overlaps : Check an IntervalIndex elementwise for
469
+ overlaps.
470
+
471
+ Examples
472
+ --------
473
+ >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
474
+ >>> index
475
+ IntervalIndex([(0, 2], (1, 3], (4, 5]],
476
+ dtype='interval[int64, right]')
477
+ >>> index.is_overlapping
478
+ True
479
+
480
+ Intervals that share closed endpoints overlap:
481
+
482
+ >>> index = pd.interval_range(0, 3, closed='both')
483
+ >>> index
484
+ IntervalIndex([[0, 1], [1, 2], [2, 3]],
485
+ dtype='interval[int64, both]')
486
+ >>> index.is_overlapping
487
+ True
488
+
489
+ Intervals that only have an open endpoint in common do not overlap:
490
+
491
+ >>> index = pd.interval_range(0, 3, closed='left')
492
+ >>> index
493
+ IntervalIndex([[0, 1), [1, 2), [2, 3)],
494
+ dtype='interval[int64, left]')
495
+ >>> index.is_overlapping
496
+ False
497
+ """
498
+ # GH 23309
499
+ return self._engine.is_overlapping
500
+
501
+ def _needs_i8_conversion(self, key) -> bool:
502
+ """
503
+ Check if a given key needs i8 conversion. Conversion is necessary for
504
+ Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
505
+ Interval-like requires conversion if its endpoints are one of the
506
+ aforementioned types.
507
+
508
+ Assumes that any list-like data has already been cast to an Index.
509
+
510
+ Parameters
511
+ ----------
512
+ key : scalar or Index-like
513
+ The key that should be checked for i8 conversion
514
+
515
+ Returns
516
+ -------
517
+ bool
518
+ """
519
+ key_dtype = getattr(key, "dtype", None)
520
+ if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
521
+ return self._needs_i8_conversion(key.left)
522
+
523
+ i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
524
+ return isinstance(key, i8_types)
525
+
526
+ def _maybe_convert_i8(self, key):
527
+ """
528
+ Maybe convert a given key to its equivalent i8 value(s). Used as a
529
+ preprocessing step prior to IntervalTree queries (self._engine), which
530
+ expects numeric data.
531
+
532
+ Parameters
533
+ ----------
534
+ key : scalar or list-like
535
+ The key that should maybe be converted to i8.
536
+
537
+ Returns
538
+ -------
539
+ scalar or list-like
540
+ The original key if no conversion occurred, int if converted scalar,
541
+ Index with an int64 dtype if converted list-like.
542
+ """
543
+ if is_list_like(key):
544
+ key = ensure_index(key)
545
+ key = maybe_upcast_numeric_to_64bit(key)
546
+
547
+ if not self._needs_i8_conversion(key):
548
+ return key
549
+
550
+ scalar = is_scalar(key)
551
+ key_dtype = getattr(key, "dtype", None)
552
+ if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
553
+ # convert left/right and reconstruct
554
+ left = self._maybe_convert_i8(key.left)
555
+ right = self._maybe_convert_i8(key.right)
556
+ constructor = Interval if scalar else IntervalIndex.from_arrays
557
+ # error: "object" not callable
558
+ return constructor(
559
+ left, right, closed=self.closed
560
+ ) # type: ignore[operator]
561
+
562
+ if scalar:
563
+ # Timestamp/Timedelta
564
+ key_dtype, key_i8 = infer_dtype_from_scalar(key)
565
+ if isinstance(key, Period):
566
+ key_i8 = key.ordinal
567
+ elif isinstance(key_i8, Timestamp):
568
+ key_i8 = key_i8._value
569
+ elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
570
+ key_i8 = key_i8.view("i8")
571
+ else:
572
+ # DatetimeIndex/TimedeltaIndex
573
+ key_dtype, key_i8 = key.dtype, Index(key.asi8)
574
+ if key.hasnans:
575
+ # convert NaT from its i8 value to np.nan so it's not viewed
576
+ # as a valid value, maybe causing errors (e.g. is_overlapping)
577
+ key_i8 = key_i8.where(~key._isnan)
578
+
579
+ # ensure consistency with IntervalIndex subtype
580
+ # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
581
+ # ExtensionDtype]" has no attribute "subtype"
582
+ subtype = self.dtype.subtype # type: ignore[union-attr]
583
+
584
+ if subtype != key_dtype:
585
+ raise ValueError(
586
+ f"Cannot index an IntervalIndex of subtype {subtype} with "
587
+ f"values of dtype {key_dtype}"
588
+ )
589
+
590
+ return key_i8
591
+
592
+ def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
593
+ if not self.is_non_overlapping_monotonic:
594
+ raise KeyError(
595
+ "can only get slices from an IntervalIndex if bounds are "
596
+ "non-overlapping and all monotonic increasing or decreasing"
597
+ )
598
+
599
+ if isinstance(label, (IntervalMixin, IntervalIndex)):
600
+ raise NotImplementedError("Interval objects are not currently supported")
601
+
602
+ # GH 20921: "not is_monotonic_increasing" for the second condition
603
+ # instead of "is_monotonic_decreasing" to account for single element
604
+ # indexes being both increasing and decreasing
605
+ if (side == "left" and self.left.is_monotonic_increasing) or (
606
+ side == "right" and not self.left.is_monotonic_increasing
607
+ ):
608
+ sub_idx = self.right
609
+ if self.open_right:
610
+ label = _get_next_label(label)
611
+ else:
612
+ sub_idx = self.left
613
+ if self.open_left:
614
+ label = _get_prev_label(label)
615
+
616
+ return sub_idx._searchsorted_monotonic(label, side)
617
+
618
+ # --------------------------------------------------------------------
619
+ # Indexing Methods
620
+
621
+ def get_loc(self, key) -> int | slice | np.ndarray:
622
+ """
623
+ Get integer location, slice or boolean mask for requested label.
624
+
625
+ Parameters
626
+ ----------
627
+ key : label
628
+
629
+ Returns
630
+ -------
631
+ int if unique index, slice if monotonic index, else mask
632
+
633
+ Examples
634
+ --------
635
+ >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
636
+ >>> index = pd.IntervalIndex([i1, i2])
637
+ >>> index.get_loc(1)
638
+ 0
639
+
640
+ You can also supply a point inside an interval.
641
+
642
+ >>> index.get_loc(1.5)
643
+ 1
644
+
645
+ If a label is in several intervals, you get the locations of all the
646
+ relevant intervals.
647
+
648
+ >>> i3 = pd.Interval(0, 2)
649
+ >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
650
+ >>> overlapping_index.get_loc(0.5)
651
+ array([ True, False, True])
652
+
653
+ Only exact matches will be returned if an interval is provided.
654
+
655
+ >>> index.get_loc(pd.Interval(0, 1))
656
+ 0
657
+ """
658
+ self._check_indexing_error(key)
659
+
660
+ if isinstance(key, Interval):
661
+ if self.closed != key.closed:
662
+ raise KeyError(key)
663
+ mask = (self.left == key.left) & (self.right == key.right)
664
+ elif is_valid_na_for_dtype(key, self.dtype):
665
+ mask = self.isna()
666
+ else:
667
+ # assume scalar
668
+ op_left = le if self.closed_left else lt
669
+ op_right = le if self.closed_right else lt
670
+ try:
671
+ mask = op_left(self.left, key) & op_right(key, self.right)
672
+ except TypeError as err:
673
+ # scalar is not comparable to II subtype --> invalid label
674
+ raise KeyError(key) from err
675
+
676
+ matches = mask.sum()
677
+ if matches == 0:
678
+ raise KeyError(key)
679
+ if matches == 1:
680
+ return mask.argmax()
681
+
682
+ res = lib.maybe_booleans_to_slice(mask.view("u1"))
683
+ if isinstance(res, slice) and res.stop is None:
684
+ # TODO: DO this in maybe_booleans_to_slice?
685
+ res = slice(res.start, len(self), res.step)
686
+ return res
687
+
688
+ def _get_indexer(
689
+ self,
690
+ target: Index,
691
+ method: str | None = None,
692
+ limit: int | None = None,
693
+ tolerance: Any | None = None,
694
+ ) -> npt.NDArray[np.intp]:
695
+ if isinstance(target, IntervalIndex):
696
+ # We only get here with not self.is_overlapping
697
+ # -> at most one match per interval in target
698
+ # want exact matches -> need both left/right to match, so defer to
699
+ # left/right get_indexer, compare elementwise, equality -> match
700
+ indexer = self._get_indexer_unique_sides(target)
701
+
702
+ elif not is_object_dtype(target.dtype):
703
+ # homogeneous scalar index: use IntervalTree
704
+ # we should always have self._should_partial_index(target) here
705
+ target = self._maybe_convert_i8(target)
706
+ indexer = self._engine.get_indexer(target.values)
707
+ else:
708
+ # heterogeneous scalar index: defer elementwise to get_loc
709
+ # we should always have self._should_partial_index(target) here
710
+ return self._get_indexer_pointwise(target)[0]
711
+
712
+ return ensure_platform_int(indexer)
713
+
714
+ @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
715
+ def get_indexer_non_unique(
716
+ self, target: Index
717
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
718
+ target = ensure_index(target)
719
+
720
+ if not self._should_compare(target) and not self._should_partial_index(target):
721
+ # e.g. IntervalIndex with different closed or incompatible subtype
722
+ # -> no matches
723
+ return self._get_indexer_non_comparable(target, None, unique=False)
724
+
725
+ elif isinstance(target, IntervalIndex):
726
+ if self.left.is_unique and self.right.is_unique:
727
+ # fastpath available even if we don't have self._index_as_unique
728
+ indexer = self._get_indexer_unique_sides(target)
729
+ missing = (indexer == -1).nonzero()[0]
730
+ else:
731
+ return self._get_indexer_pointwise(target)
732
+
733
+ elif is_object_dtype(target.dtype) or not self._should_partial_index(target):
734
+ # target might contain intervals: defer elementwise to get_loc
735
+ return self._get_indexer_pointwise(target)
736
+
737
+ else:
738
+ # Note: this case behaves differently from other Index subclasses
739
+ # because IntervalIndex does partial-int indexing
740
+ target = self._maybe_convert_i8(target)
741
+ indexer, missing = self._engine.get_indexer_non_unique(target.values)
742
+
743
+ return ensure_platform_int(indexer), ensure_platform_int(missing)
744
+
745
+ def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:
746
+ """
747
+ _get_indexer specialized to the case where both of our sides are unique.
748
+ """
749
+ # Caller is responsible for checking
750
+ # `self.left.is_unique and self.right.is_unique`
751
+
752
+ left_indexer = self.left.get_indexer(target.left)
753
+ right_indexer = self.right.get_indexer(target.right)
754
+ indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
755
+ return indexer
756
+
757
+ def _get_indexer_pointwise(
758
+ self, target: Index
759
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
760
+ """
761
+ pointwise implementation for get_indexer and get_indexer_non_unique.
762
+ """
763
+ indexer, missing = [], []
764
+ for i, key in enumerate(target):
765
+ try:
766
+ locs = self.get_loc(key)
767
+ if isinstance(locs, slice):
768
+ # Only needed for get_indexer_non_unique
769
+ locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
770
+ elif lib.is_integer(locs):
771
+ locs = np.array(locs, ndmin=1)
772
+ else:
773
+ # otherwise we have ndarray[bool]
774
+ locs = np.where(locs)[0]
775
+ except KeyError:
776
+ missing.append(i)
777
+ locs = np.array([-1])
778
+ except InvalidIndexError:
779
+ # i.e. non-scalar key e.g. a tuple.
780
+ # see test_append_different_columns_types_raises
781
+ missing.append(i)
782
+ locs = np.array([-1])
783
+
784
+ indexer.append(locs)
785
+
786
+ indexer = np.concatenate(indexer)
787
+ return ensure_platform_int(indexer), ensure_platform_int(missing)
788
+
789
+ @cache_readonly
790
+ def _index_as_unique(self) -> bool:
791
+ return not self.is_overlapping and self._engine._na_count < 2
792
+
793
+ _requires_unique_msg = (
794
+ "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique"
795
+ )
796
+
797
+ def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
798
+ if not (key.step is None or key.step == 1):
799
+ # GH#31658 if label-based, we require step == 1,
800
+ # if positional, we disallow float start/stop
801
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
802
+ if kind == "loc":
803
+ raise ValueError(msg)
804
+ if kind == "getitem":
805
+ if not is_valid_positional_slice(key):
806
+ # i.e. this cannot be interpreted as a positional slice
807
+ raise ValueError(msg)
808
+
809
+ return super()._convert_slice_indexer(key, kind)
810
+
811
+ @cache_readonly
812
+ def _should_fallback_to_positional(self) -> bool:
813
+ # integer lookups in Series.__getitem__ are unambiguously
814
+ # positional in this case
815
+ # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
816
+ # ExtensionDtype]" has no attribute "subtype"
817
+ return self.dtype.subtype.kind in "mM" # type: ignore[union-attr]
818
+
819
+ def _maybe_cast_slice_bound(self, label, side: str):
820
+ return getattr(self, side)._maybe_cast_slice_bound(label, side)
821
+
822
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
823
+ if not isinstance(dtype, IntervalDtype):
824
+ return False
825
+ common_subtype = find_common_type([self.dtype, dtype])
826
+ return not is_object_dtype(common_subtype)
827
+
828
+ # --------------------------------------------------------------------
829
+
830
+ @cache_readonly
831
+ def left(self) -> Index:
832
+ return Index(self._data.left, copy=False)
833
+
834
+ @cache_readonly
835
+ def right(self) -> Index:
836
+ return Index(self._data.right, copy=False)
837
+
838
+ @cache_readonly
839
+ def mid(self) -> Index:
840
+ return Index(self._data.mid, copy=False)
841
+
842
+ @property
843
+ def length(self) -> Index:
844
+ return Index(self._data.length, copy=False)
845
+
846
+ # --------------------------------------------------------------------
847
+ # Set Operations
848
+
849
+ def _intersection(self, other, sort):
850
+ """
851
+ intersection specialized to the case with matching dtypes.
852
+ """
853
+ # For IntervalIndex we also know other.closed == self.closed
854
+ if self.left.is_unique and self.right.is_unique:
855
+ taken = self._intersection_unique(other)
856
+ elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
857
+ # Swap other/self if other is unique and self does not have
858
+ # multiple NaNs
859
+ taken = other._intersection_unique(self)
860
+ else:
861
+ # duplicates
862
+ taken = self._intersection_non_unique(other)
863
+
864
+ if sort is None:
865
+ taken = taken.sort_values()
866
+
867
+ return taken
868
+
869
+ def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex:
870
+ """
871
+ Used when the IntervalIndex does not have any common endpoint,
872
+ no matter left or right.
873
+ Return the intersection with another IntervalIndex.
874
+ Parameters
875
+ ----------
876
+ other : IntervalIndex
877
+ Returns
878
+ -------
879
+ IntervalIndex
880
+ """
881
+ # Note: this is much more performant than super()._intersection(other)
882
+ lindexer = self.left.get_indexer(other.left)
883
+ rindexer = self.right.get_indexer(other.right)
884
+
885
+ match = (lindexer == rindexer) & (lindexer != -1)
886
+ indexer = lindexer.take(match.nonzero()[0])
887
+ indexer = unique(indexer)
888
+
889
+ return self.take(indexer)
890
+
891
+ def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex:
892
+ """
893
+ Used when the IntervalIndex does have some common endpoints,
894
+ on either sides.
895
+ Return the intersection with another IntervalIndex.
896
+
897
+ Parameters
898
+ ----------
899
+ other : IntervalIndex
900
+
901
+ Returns
902
+ -------
903
+ IntervalIndex
904
+ """
905
+ # Note: this is about 3.25x faster than super()._intersection(other)
906
+ # in IntervalIndexMethod.time_intersection_both_duplicate(1000)
907
+ mask = np.zeros(len(self), dtype=bool)
908
+
909
+ if self.hasnans and other.hasnans:
910
+ first_nan_loc = np.arange(len(self))[self.isna()][0]
911
+ mask[first_nan_loc] = True
912
+
913
+ other_tups = set(zip(other.left, other.right))
914
+ for i, tup in enumerate(zip(self.left, self.right)):
915
+ if tup in other_tups:
916
+ mask[i] = True
917
+
918
+ return self[mask]
919
+
920
+ # --------------------------------------------------------------------
921
+
922
+ def _get_engine_target(self) -> np.ndarray:
923
+ # Note: we _could_ use libjoin functions by either casting to object
924
+ # dtype or constructing tuples (faster than constructing Intervals)
925
+ # but the libjoin fastpaths are no longer fast in these cases.
926
+ raise NotImplementedError(
927
+ "IntervalIndex does not use libjoin fastpaths or pass values to "
928
+ "IndexEngine objects"
929
+ )
930
+
931
+ def _from_join_target(self, result):
932
+ raise NotImplementedError("IntervalIndex does not use libjoin fastpaths")
933
+
934
+ # TODO: arithmetic operations
935
+
936
+
937
+ def _is_valid_endpoint(endpoint) -> bool:
938
+ """
939
+ Helper for interval_range to check if start/end are valid types.
940
+ """
941
+ return any(
942
+ [
943
+ is_number(endpoint),
944
+ isinstance(endpoint, Timestamp),
945
+ isinstance(endpoint, Timedelta),
946
+ endpoint is None,
947
+ ]
948
+ )
949
+
950
+
951
+ def _is_type_compatible(a, b) -> bool:
952
+ """
953
+ Helper for interval_range to check type compat of start/end/freq.
954
+ """
955
+ is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))
956
+ is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))
957
+ return (
958
+ (is_number(a) and is_number(b))
959
+ or (is_ts_compat(a) and is_ts_compat(b))
960
+ or (is_td_compat(a) and is_td_compat(b))
961
+ or com.any_none(a, b)
962
+ )
963
+
964
+
965
+ def interval_range(
966
+ start=None,
967
+ end=None,
968
+ periods=None,
969
+ freq=None,
970
+ name: Hashable | None = None,
971
+ closed: IntervalClosedType = "right",
972
+ ) -> IntervalIndex:
973
+ """
974
+ Return a fixed frequency IntervalIndex.
975
+
976
+ Parameters
977
+ ----------
978
+ start : numeric or datetime-like, default None
979
+ Left bound for generating intervals.
980
+ end : numeric or datetime-like, default None
981
+ Right bound for generating intervals.
982
+ periods : int, default None
983
+ Number of periods to generate.
984
+ freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None
985
+ The length of each interval. Must be consistent with the type of start
986
+ and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
987
+ for numeric and 'D' for datetime-like.
988
+ name : str, default None
989
+ Name of the resulting IntervalIndex.
990
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
991
+ Whether the intervals are closed on the left-side, right-side, both
992
+ or neither.
993
+
994
+ Returns
995
+ -------
996
+ IntervalIndex
997
+
998
+ See Also
999
+ --------
1000
+ IntervalIndex : An Index of intervals that are all closed on the same side.
1001
+
1002
+ Notes
1003
+ -----
1004
+ Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
1005
+ exactly three must be specified. If ``freq`` is omitted, the resulting
1006
+ ``IntervalIndex`` will have ``periods`` linearly spaced elements between
1007
+ ``start`` and ``end``, inclusively.
1008
+
1009
+ To learn more about datetime-like frequency strings, please see `this link
1010
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
1011
+
1012
+ Examples
1013
+ --------
1014
+ Numeric ``start`` and ``end`` is supported.
1015
+
1016
+ >>> pd.interval_range(start=0, end=5)
1017
+ IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
1018
+ dtype='interval[int64, right]')
1019
+
1020
+ Additionally, datetime-like input is also supported.
1021
+
1022
+ >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
1023
+ ... end=pd.Timestamp('2017-01-04'))
1024
+ IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00],
1025
+ (2017-01-02 00:00:00, 2017-01-03 00:00:00],
1026
+ (2017-01-03 00:00:00, 2017-01-04 00:00:00]],
1027
+ dtype='interval[datetime64[ns], right]')
1028
+
1029
+ The ``freq`` parameter specifies the frequency between the left and right.
1030
+ endpoints of the individual intervals within the ``IntervalIndex``. For
1031
+ numeric ``start`` and ``end``, the frequency must also be numeric.
1032
+
1033
+ >>> pd.interval_range(start=0, periods=4, freq=1.5)
1034
+ IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
1035
+ dtype='interval[float64, right]')
1036
+
1037
+ Similarly, for datetime-like ``start`` and ``end``, the frequency must be
1038
+ convertible to a DateOffset.
1039
+
1040
+ >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
1041
+ ... periods=3, freq='MS')
1042
+ IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00],
1043
+ (2017-02-01 00:00:00, 2017-03-01 00:00:00],
1044
+ (2017-03-01 00:00:00, 2017-04-01 00:00:00]],
1045
+ dtype='interval[datetime64[ns], right]')
1046
+
1047
+ Specify ``start``, ``end``, and ``periods``; the frequency is generated
1048
+ automatically (linearly spaced).
1049
+
1050
+ >>> pd.interval_range(start=0, end=6, periods=4)
1051
+ IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
1052
+ dtype='interval[float64, right]')
1053
+
1054
+ The ``closed`` parameter specifies which endpoints of the individual
1055
+ intervals within the ``IntervalIndex`` are closed.
1056
+
1057
+ >>> pd.interval_range(end=5, periods=4, closed='both')
1058
+ IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
1059
+ dtype='interval[int64, both]')
1060
+ """
1061
+ start = maybe_box_datetimelike(start)
1062
+ end = maybe_box_datetimelike(end)
1063
+ endpoint = start if start is not None else end
1064
+
1065
+ if freq is None and com.any_none(periods, start, end):
1066
+ freq = 1 if is_number(endpoint) else "D"
1067
+
1068
+ if com.count_not_none(start, end, periods, freq) != 3:
1069
+ raise ValueError(
1070
+ "Of the four parameters: start, end, periods, and "
1071
+ "freq, exactly three must be specified"
1072
+ )
1073
+
1074
+ if not _is_valid_endpoint(start):
1075
+ raise ValueError(f"start must be numeric or datetime-like, got {start}")
1076
+ if not _is_valid_endpoint(end):
1077
+ raise ValueError(f"end must be numeric or datetime-like, got {end}")
1078
+
1079
+ periods = validate_periods(periods)
1080
+
1081
+ if freq is not None and not is_number(freq):
1082
+ try:
1083
+ freq = to_offset(freq)
1084
+ except ValueError as err:
1085
+ raise ValueError(
1086
+ f"freq must be numeric or convertible to DateOffset, got {freq}"
1087
+ ) from err
1088
+
1089
+ # verify type compatibility
1090
+ if not all(
1091
+ [
1092
+ _is_type_compatible(start, end),
1093
+ _is_type_compatible(start, freq),
1094
+ _is_type_compatible(end, freq),
1095
+ ]
1096
+ ):
1097
+ raise TypeError("start, end, freq need to be type compatible")
1098
+
1099
+ # +1 to convert interval count to breaks count (n breaks = n-1 intervals)
1100
+ if periods is not None:
1101
+ periods += 1
1102
+
1103
+ breaks: np.ndarray | TimedeltaIndex | DatetimeIndex
1104
+
1105
+ if is_number(endpoint):
1106
+ if com.all_not_none(start, end, freq):
1107
+ # 0.1 ensures we capture end
1108
+ breaks = np.arange(start, end + (freq * 0.1), freq)
1109
+ else:
1110
+ # compute the period/start/end if unspecified (at most one)
1111
+ if periods is None:
1112
+ periods = int((end - start) // freq) + 1
1113
+ elif start is None:
1114
+ start = end - (periods - 1) * freq
1115
+ elif end is None:
1116
+ end = start + (periods - 1) * freq
1117
+
1118
+ breaks = np.linspace(start, end, periods)
1119
+ if all(is_integer(x) for x in com.not_none(start, end, freq)):
1120
+ # np.linspace always produces float output
1121
+
1122
+ # error: Argument 1 to "maybe_downcast_numeric" has incompatible type
1123
+ # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]";
1124
+ # expected "ndarray[Any, Any]" [
1125
+ breaks = maybe_downcast_numeric(
1126
+ breaks, # type: ignore[arg-type]
1127
+ np.dtype("int64"),
1128
+ )
1129
+ else:
1130
+ # delegate to the appropriate range function
1131
+ if isinstance(endpoint, Timestamp):
1132
+ breaks = date_range(start=start, end=end, periods=periods, freq=freq)
1133
+ else:
1134
+ breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq)
1135
+
1136
+ return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/multi.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/period.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import (
4
+ datetime,
5
+ timedelta,
6
+ )
7
+ from typing import TYPE_CHECKING
8
+ import warnings
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import index as libindex
13
+ from pandas._libs.tslibs import (
14
+ BaseOffset,
15
+ NaT,
16
+ Period,
17
+ Resolution,
18
+ Tick,
19
+ )
20
+ from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR
21
+ from pandas.util._decorators import (
22
+ cache_readonly,
23
+ doc,
24
+ )
25
+ from pandas.util._exceptions import find_stack_level
26
+
27
+ from pandas.core.dtypes.common import is_integer
28
+ from pandas.core.dtypes.dtypes import PeriodDtype
29
+ from pandas.core.dtypes.generic import ABCSeries
30
+ from pandas.core.dtypes.missing import is_valid_na_for_dtype
31
+
32
+ from pandas.core.arrays.period import (
33
+ PeriodArray,
34
+ period_array,
35
+ raise_on_incompatible,
36
+ validate_dtype_freq,
37
+ )
38
+ import pandas.core.common as com
39
+ import pandas.core.indexes.base as ibase
40
+ from pandas.core.indexes.base import maybe_extract_name
41
+ from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
42
+ from pandas.core.indexes.datetimes import (
43
+ DatetimeIndex,
44
+ Index,
45
+ )
46
+ from pandas.core.indexes.extension import inherit_names
47
+
48
+ if TYPE_CHECKING:
49
+ from collections.abc import Hashable
50
+
51
+ from pandas._typing import (
52
+ Dtype,
53
+ DtypeObj,
54
+ Self,
55
+ npt,
56
+ )
57
+
58
+
59
+ _index_doc_kwargs = dict(ibase._index_doc_kwargs)
60
+ _index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
61
+ _shared_doc_kwargs = {
62
+ "klass": "PeriodArray",
63
+ }
64
+
65
+ # --- Period index sketch
66
+
67
+
68
+ def _new_PeriodIndex(cls, **d):
69
+ # GH13277 for unpickling
70
+ values = d.pop("data")
71
+ if values.dtype == "int64":
72
+ freq = d.pop("freq", None)
73
+ dtype = PeriodDtype(freq)
74
+ values = PeriodArray(values, dtype=dtype)
75
+ return cls._simple_new(values, **d)
76
+ else:
77
+ return cls(values, **d)
78
+
79
+
80
+ @inherit_names(
81
+ ["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
82
+ PeriodArray,
83
+ wrap=True,
84
+ )
85
+ @inherit_names(["is_leap_year"], PeriodArray)
86
+ class PeriodIndex(DatetimeIndexOpsMixin):
87
+ """
88
+ Immutable ndarray holding ordinal values indicating regular periods in time.
89
+
90
+ Index keys are boxed to Period objects which carries the metadata (eg,
91
+ frequency information).
92
+
93
+ Parameters
94
+ ----------
95
+ data : array-like (1d int np.ndarray or PeriodArray), optional
96
+ Optional period-like data to construct index with.
97
+ copy : bool
98
+ Make a copy of input ndarray.
99
+ freq : str or period object, optional
100
+ One of pandas period strings or corresponding objects.
101
+ year : int, array, or Series, default None
102
+
103
+ .. deprecated:: 2.2.0
104
+ Use PeriodIndex.from_fields instead.
105
+ month : int, array, or Series, default None
106
+
107
+ .. deprecated:: 2.2.0
108
+ Use PeriodIndex.from_fields instead.
109
+ quarter : int, array, or Series, default None
110
+
111
+ .. deprecated:: 2.2.0
112
+ Use PeriodIndex.from_fields instead.
113
+ day : int, array, or Series, default None
114
+
115
+ .. deprecated:: 2.2.0
116
+ Use PeriodIndex.from_fields instead.
117
+ hour : int, array, or Series, default None
118
+
119
+ .. deprecated:: 2.2.0
120
+ Use PeriodIndex.from_fields instead.
121
+ minute : int, array, or Series, default None
122
+
123
+ .. deprecated:: 2.2.0
124
+ Use PeriodIndex.from_fields instead.
125
+ second : int, array, or Series, default None
126
+
127
+ .. deprecated:: 2.2.0
128
+ Use PeriodIndex.from_fields instead.
129
+ dtype : str or PeriodDtype, default None
130
+
131
+ Attributes
132
+ ----------
133
+ day
134
+ dayofweek
135
+ day_of_week
136
+ dayofyear
137
+ day_of_year
138
+ days_in_month
139
+ daysinmonth
140
+ end_time
141
+ freq
142
+ freqstr
143
+ hour
144
+ is_leap_year
145
+ minute
146
+ month
147
+ quarter
148
+ qyear
149
+ second
150
+ start_time
151
+ week
152
+ weekday
153
+ weekofyear
154
+ year
155
+
156
+ Methods
157
+ -------
158
+ asfreq
159
+ strftime
160
+ to_timestamp
161
+ from_fields
162
+ from_ordinals
163
+
164
+ See Also
165
+ --------
166
+ Index : The base pandas Index type.
167
+ Period : Represents a period of time.
168
+ DatetimeIndex : Index with datetime64 data.
169
+ TimedeltaIndex : Index of timedelta64 data.
170
+ period_range : Create a fixed-frequency PeriodIndex.
171
+
172
+ Examples
173
+ --------
174
+ >>> idx = pd.PeriodIndex.from_fields(year=[2000, 2002], quarter=[1, 3])
175
+ >>> idx
176
+ PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
177
+ """
178
+
179
+ _typ = "periodindex"
180
+
181
+ _data: PeriodArray
182
+ freq: BaseOffset
183
+ dtype: PeriodDtype
184
+
185
+ _data_cls = PeriodArray
186
+ _supports_partial_string_indexing = True
187
+
188
+ @property
189
+ def _engine_type(self) -> type[libindex.PeriodEngine]:
190
+ return libindex.PeriodEngine
191
+
192
+ @cache_readonly
193
+ def _resolution_obj(self) -> Resolution:
194
+ # for compat with DatetimeIndex
195
+ return self.dtype._resolution_obj
196
+
197
+ # --------------------------------------------------------------------
198
+ # methods that dispatch to array and wrap result in Index
199
+ # These are defined here instead of via inherit_names for mypy
200
+
201
+ @doc(
202
+ PeriodArray.asfreq,
203
+ other="pandas.arrays.PeriodArray",
204
+ other_name="PeriodArray",
205
+ **_shared_doc_kwargs,
206
+ )
207
+ def asfreq(self, freq=None, how: str = "E") -> Self:
208
+ arr = self._data.asfreq(freq, how)
209
+ return type(self)._simple_new(arr, name=self.name)
210
+
211
+ @doc(PeriodArray.to_timestamp)
212
+ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex:
213
+ arr = self._data.to_timestamp(freq, how)
214
+ return DatetimeIndex._simple_new(arr, name=self.name)
215
+
216
+ @property
217
+ @doc(PeriodArray.hour.fget)
218
+ def hour(self) -> Index:
219
+ return Index(self._data.hour, name=self.name)
220
+
221
+ @property
222
+ @doc(PeriodArray.minute.fget)
223
+ def minute(self) -> Index:
224
+ return Index(self._data.minute, name=self.name)
225
+
226
+ @property
227
+ @doc(PeriodArray.second.fget)
228
+ def second(self) -> Index:
229
+ return Index(self._data.second, name=self.name)
230
+
231
+ # ------------------------------------------------------------------------
232
+ # Index Constructors
233
+
234
+ def __new__(
235
+ cls,
236
+ data=None,
237
+ ordinal=None,
238
+ freq=None,
239
+ dtype: Dtype | None = None,
240
+ copy: bool = False,
241
+ name: Hashable | None = None,
242
+ **fields,
243
+ ) -> Self:
244
+ valid_field_set = {
245
+ "year",
246
+ "month",
247
+ "day",
248
+ "quarter",
249
+ "hour",
250
+ "minute",
251
+ "second",
252
+ }
253
+
254
+ refs = None
255
+ if not copy and isinstance(data, (Index, ABCSeries)):
256
+ refs = data._references
257
+
258
+ if not set(fields).issubset(valid_field_set):
259
+ argument = next(iter(set(fields) - valid_field_set))
260
+ raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
261
+ elif len(fields):
262
+ # GH#55960
263
+ warnings.warn(
264
+ "Constructing PeriodIndex from fields is deprecated. Use "
265
+ "PeriodIndex.from_fields instead.",
266
+ FutureWarning,
267
+ stacklevel=find_stack_level(),
268
+ )
269
+
270
+ if ordinal is not None:
271
+ # GH#55960
272
+ warnings.warn(
273
+ "The 'ordinal' keyword in PeriodIndex is deprecated and will "
274
+ "be removed in a future version. Use PeriodIndex.from_ordinals "
275
+ "instead.",
276
+ FutureWarning,
277
+ stacklevel=find_stack_level(),
278
+ )
279
+
280
+ name = maybe_extract_name(name, data, cls)
281
+
282
+ if data is None and ordinal is None:
283
+ # range-based.
284
+ if not fields:
285
+ # test_pickle_compat_construction
286
+ cls._raise_scalar_data_error(None)
287
+ data = cls.from_fields(**fields, freq=freq)._data
288
+ copy = False
289
+
290
+ elif fields:
291
+ if data is not None:
292
+ raise ValueError("Cannot pass both data and fields")
293
+ raise ValueError("Cannot pass both ordinal and fields")
294
+
295
+ else:
296
+ freq = validate_dtype_freq(dtype, freq)
297
+
298
+ # PeriodIndex allow PeriodIndex(period_index, freq=different)
299
+ # Let's not encourage that kind of behavior in PeriodArray.
300
+
301
+ if freq and isinstance(data, cls) and data.freq != freq:
302
+ # TODO: We can do some of these with no-copy / coercion?
303
+ # e.g. D -> 2D seems to be OK
304
+ data = data.asfreq(freq)
305
+
306
+ if data is None and ordinal is not None:
307
+ ordinal = np.asarray(ordinal, dtype=np.int64)
308
+ dtype = PeriodDtype(freq)
309
+ data = PeriodArray(ordinal, dtype=dtype)
310
+ elif data is not None and ordinal is not None:
311
+ raise ValueError("Cannot pass both data and ordinal")
312
+ else:
313
+ # don't pass copy here, since we copy later.
314
+ data = period_array(data=data, freq=freq)
315
+
316
+ if copy:
317
+ data = data.copy()
318
+
319
+ return cls._simple_new(data, name=name, refs=refs)
320
+
321
+ @classmethod
322
+ def from_fields(
323
+ cls,
324
+ *,
325
+ year=None,
326
+ quarter=None,
327
+ month=None,
328
+ day=None,
329
+ hour=None,
330
+ minute=None,
331
+ second=None,
332
+ freq=None,
333
+ ) -> Self:
334
+ fields = {
335
+ "year": year,
336
+ "quarter": quarter,
337
+ "month": month,
338
+ "day": day,
339
+ "hour": hour,
340
+ "minute": minute,
341
+ "second": second,
342
+ }
343
+ fields = {key: value for key, value in fields.items() if value is not None}
344
+ arr = PeriodArray._from_fields(fields=fields, freq=freq)
345
+ return cls._simple_new(arr)
346
+
347
+ @classmethod
348
+ def from_ordinals(cls, ordinals, *, freq, name=None) -> Self:
349
+ ordinals = np.asarray(ordinals, dtype=np.int64)
350
+ dtype = PeriodDtype(freq)
351
+ data = PeriodArray._simple_new(ordinals, dtype=dtype)
352
+ return cls._simple_new(data, name=name)
353
+
354
+ # ------------------------------------------------------------------------
355
+ # Data
356
+
357
+ @property
358
+ def values(self) -> npt.NDArray[np.object_]:
359
+ return np.asarray(self, dtype=object)
360
+
361
+ def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:
362
+ """
363
+ Convert timedelta-like input to an integer multiple of self.freq
364
+
365
+ Parameters
366
+ ----------
367
+ other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
368
+
369
+ Returns
370
+ -------
371
+ converted : int, np.ndarray[int64]
372
+
373
+ Raises
374
+ ------
375
+ IncompatibleFrequency : if the input cannot be written as a multiple
376
+ of self.freq. Note IncompatibleFrequency subclasses ValueError.
377
+ """
378
+ if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
379
+ if isinstance(self.freq, Tick):
380
+ # _check_timedeltalike_freq_compat will raise if incompatible
381
+ delta = self._data._check_timedeltalike_freq_compat(other)
382
+ return delta
383
+ elif isinstance(other, BaseOffset):
384
+ if other.base == self.freq.base:
385
+ return other.n
386
+
387
+ raise raise_on_incompatible(self, other)
388
+ elif is_integer(other):
389
+ assert isinstance(other, int)
390
+ return other
391
+
392
+ # raise when input doesn't have freq
393
+ raise raise_on_incompatible(self, None)
394
+
395
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
396
+ """
397
+ Can we compare values of the given dtype to our own?
398
+ """
399
+ return self.dtype == dtype
400
+
401
+ # ------------------------------------------------------------------------
402
+ # Index Methods
403
+
404
+ def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:
405
+ """
406
+ where : array of timestamps
407
+ mask : np.ndarray[bool]
408
+ Array of booleans where data is not NA.
409
+ """
410
+ if isinstance(where, DatetimeIndex):
411
+ where = PeriodIndex(where._values, freq=self.freq)
412
+ elif not isinstance(where, PeriodIndex):
413
+ raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
414
+
415
+ return super().asof_locs(where, mask)
416
+
417
+ @property
418
+ def is_full(self) -> bool:
419
+ """
420
+ Returns True if this PeriodIndex is range-like in that all Periods
421
+ between start and end are present, in order.
422
+ """
423
+ if len(self) == 0:
424
+ return True
425
+ if not self.is_monotonic_increasing:
426
+ raise ValueError("Index is not monotonic")
427
+ values = self.asi8
428
+ return bool(((values[1:] - values[:-1]) < 2).all())
429
+
430
+ @property
431
+ def inferred_type(self) -> str:
432
+ # b/c data is represented as ints make sure we can't have ambiguous
433
+ # indexing
434
+ return "period"
435
+
436
+ # ------------------------------------------------------------------------
437
+ # Indexing Methods
438
+
439
+ def _convert_tolerance(self, tolerance, target):
440
+ # Returned tolerance must be in dtype/units so that
441
+ # `|self._get_engine_target() - target._engine_target()| <= tolerance`
442
+ # is meaningful. Since PeriodIndex returns int64 for engine_target,
443
+ # we may need to convert timedelta64 tolerance to int64.
444
+ tolerance = super()._convert_tolerance(tolerance, target)
445
+
446
+ if self.dtype == target.dtype:
447
+ # convert tolerance to i8
448
+ tolerance = self._maybe_convert_timedelta(tolerance)
449
+
450
+ return tolerance
451
+
452
+ def get_loc(self, key):
453
+ """
454
+ Get integer location for requested label.
455
+
456
+ Parameters
457
+ ----------
458
+ key : Period, NaT, str, or datetime
459
+ String or datetime key must be parsable as Period.
460
+
461
+ Returns
462
+ -------
463
+ loc : int or ndarray[int64]
464
+
465
+ Raises
466
+ ------
467
+ KeyError
468
+ Key is not present in the index.
469
+ TypeError
470
+ If key is listlike or otherwise not hashable.
471
+ """
472
+ orig_key = key
473
+
474
+ self._check_indexing_error(key)
475
+
476
+ if is_valid_na_for_dtype(key, self.dtype):
477
+ key = NaT
478
+
479
+ elif isinstance(key, str):
480
+ try:
481
+ parsed, reso = self._parse_with_reso(key)
482
+ except ValueError as err:
483
+ # A string with invalid format
484
+ raise KeyError(f"Cannot interpret '{key}' as period") from err
485
+
486
+ if self._can_partial_date_slice(reso):
487
+ try:
488
+ return self._partial_date_slice(reso, parsed)
489
+ except KeyError as err:
490
+ raise KeyError(key) from err
491
+
492
+ if reso == self._resolution_obj:
493
+ # the reso < self._resolution_obj case goes
494
+ # through _get_string_slice
495
+ key = self._cast_partial_indexing_scalar(parsed)
496
+ else:
497
+ raise KeyError(key)
498
+
499
+ elif isinstance(key, Period):
500
+ self._disallow_mismatched_indexing(key)
501
+
502
+ elif isinstance(key, datetime):
503
+ key = self._cast_partial_indexing_scalar(key)
504
+
505
+ else:
506
+ # in particular integer, which Period constructor would cast to string
507
+ raise KeyError(key)
508
+
509
+ try:
510
+ return Index.get_loc(self, key)
511
+ except KeyError as err:
512
+ raise KeyError(orig_key) from err
513
+
514
+ def _disallow_mismatched_indexing(self, key: Period) -> None:
515
+ if key._dtype != self.dtype:
516
+ raise KeyError(key)
517
+
518
+ def _cast_partial_indexing_scalar(self, label: datetime) -> Period:
519
+ try:
520
+ period = Period(label, freq=self.freq)
521
+ except ValueError as err:
522
+ # we cannot construct the Period
523
+ raise KeyError(label) from err
524
+ return period
525
+
526
+ @doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound)
527
+ def _maybe_cast_slice_bound(self, label, side: str):
528
+ if isinstance(label, datetime):
529
+ label = self._cast_partial_indexing_scalar(label)
530
+
531
+ return super()._maybe_cast_slice_bound(label, side)
532
+
533
+ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
534
+ freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)
535
+ iv = Period(parsed, freq=freq)
536
+ return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
537
+
538
+ @doc(DatetimeIndexOpsMixin.shift)
539
+ def shift(self, periods: int = 1, freq=None) -> Self:
540
+ if freq is not None:
541
+ raise TypeError(
542
+ f"`freq` argument is not supported for {type(self).__name__}.shift"
543
+ )
544
+ return self + periods
545
+
546
+
547
+ def period_range(
548
+ start=None,
549
+ end=None,
550
+ periods: int | None = None,
551
+ freq=None,
552
+ name: Hashable | None = None,
553
+ ) -> PeriodIndex:
554
+ """
555
+ Return a fixed frequency PeriodIndex.
556
+
557
+ The day (calendar) is the default frequency.
558
+
559
+ Parameters
560
+ ----------
561
+ start : str, datetime, date, pandas.Timestamp, or period-like, default None
562
+ Left bound for generating periods.
563
+ end : str, datetime, date, pandas.Timestamp, or period-like, default None
564
+ Right bound for generating periods.
565
+ periods : int, default None
566
+ Number of periods to generate.
567
+ freq : str or DateOffset, optional
568
+ Frequency alias. By default the freq is taken from `start` or `end`
569
+ if those are Period objects. Otherwise, the default is ``"D"`` for
570
+ daily frequency.
571
+ name : str, default None
572
+ Name of the resulting PeriodIndex.
573
+
574
+ Returns
575
+ -------
576
+ PeriodIndex
577
+
578
+ Notes
579
+ -----
580
+ Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
581
+ must be specified.
582
+
583
+ To learn more about the frequency strings, please see `this link
584
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
585
+
586
+ Examples
587
+ --------
588
+ >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
589
+ PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
590
+ '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
591
+ '2018-01'],
592
+ dtype='period[M]')
593
+
594
+ If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
595
+ endpoints for a ``PeriodIndex`` with frequency matching that of the
596
+ ``period_range`` constructor.
597
+
598
+ >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
599
+ ... end=pd.Period('2017Q2', freq='Q'), freq='M')
600
+ PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
601
+ dtype='period[M]')
602
+ """
603
+ if com.count_not_none(start, end, periods) != 2:
604
+ raise ValueError(
605
+ "Of the three parameters: start, end, and periods, "
606
+ "exactly two must be specified"
607
+ )
608
+ if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
609
+ freq = "D"
610
+
611
+ data, freq = PeriodArray._generate_range(start, end, periods, freq)
612
+ dtype = PeriodDtype(freq)
613
+ data = PeriodArray(data, dtype=dtype)
614
+ return PeriodIndex(data, name=name)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/range.py ADDED
@@ -0,0 +1,1187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import (
4
+ Hashable,
5
+ Iterator,
6
+ )
7
+ from datetime import timedelta
8
+ import operator
9
+ from sys import getsizeof
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ Callable,
14
+ Literal,
15
+ cast,
16
+ overload,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import (
22
+ index as libindex,
23
+ lib,
24
+ )
25
+ from pandas._libs.algos import unique_deltas
26
+ from pandas._libs.lib import no_default
27
+ from pandas.compat.numpy import function as nv
28
+ from pandas.util._decorators import (
29
+ cache_readonly,
30
+ deprecate_nonkeyword_arguments,
31
+ doc,
32
+ )
33
+
34
+ from pandas.core.dtypes.common import (
35
+ ensure_platform_int,
36
+ ensure_python_int,
37
+ is_float,
38
+ is_integer,
39
+ is_scalar,
40
+ is_signed_integer_dtype,
41
+ )
42
+ from pandas.core.dtypes.generic import ABCTimedeltaIndex
43
+
44
+ from pandas.core import ops
45
+ import pandas.core.common as com
46
+ from pandas.core.construction import extract_array
47
+ import pandas.core.indexes.base as ibase
48
+ from pandas.core.indexes.base import (
49
+ Index,
50
+ maybe_extract_name,
51
+ )
52
+ from pandas.core.ops.common import unpack_zerodim_and_defer
53
+
54
+ if TYPE_CHECKING:
55
+ from pandas._typing import (
56
+ Axis,
57
+ Dtype,
58
+ NaPosition,
59
+ Self,
60
+ npt,
61
+ )
62
+ _empty_range = range(0)
63
+ _dtype_int64 = np.dtype(np.int64)
64
+
65
+
66
+ class RangeIndex(Index):
67
+ """
68
+ Immutable Index implementing a monotonic integer range.
69
+
70
+ RangeIndex is a memory-saving special case of an Index limited to representing
71
+ monotonic ranges with a 64-bit dtype. Using RangeIndex may in some instances
72
+ improve computing speed.
73
+
74
+ This is the default index type used
75
+ by DataFrame and Series when no explicit index is provided by the user.
76
+
77
+ Parameters
78
+ ----------
79
+ start : int (default: 0), range, or other RangeIndex instance
80
+ If int and "stop" is not given, interpreted as "stop" instead.
81
+ stop : int (default: 0)
82
+ step : int (default: 1)
83
+ dtype : np.int64
84
+ Unused, accepted for homogeneity with other index types.
85
+ copy : bool, default False
86
+ Unused, accepted for homogeneity with other index types.
87
+ name : object, optional
88
+ Name to be stored in the index.
89
+
90
+ Attributes
91
+ ----------
92
+ start
93
+ stop
94
+ step
95
+
96
+ Methods
97
+ -------
98
+ from_range
99
+
100
+ See Also
101
+ --------
102
+ Index : The base pandas Index type.
103
+
104
+ Examples
105
+ --------
106
+ >>> list(pd.RangeIndex(5))
107
+ [0, 1, 2, 3, 4]
108
+
109
+ >>> list(pd.RangeIndex(-2, 4))
110
+ [-2, -1, 0, 1, 2, 3]
111
+
112
+ >>> list(pd.RangeIndex(0, 10, 2))
113
+ [0, 2, 4, 6, 8]
114
+
115
+ >>> list(pd.RangeIndex(2, -10, -3))
116
+ [2, -1, -4, -7]
117
+
118
+ >>> list(pd.RangeIndex(0))
119
+ []
120
+
121
+ >>> list(pd.RangeIndex(1, 0))
122
+ []
123
+ """
124
+
125
+ _typ = "rangeindex"
126
+ _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
127
+ _range: range
128
+ _values: np.ndarray
129
+
130
+ @property
131
+ def _engine_type(self) -> type[libindex.Int64Engine]:
132
+ return libindex.Int64Engine
133
+
134
+ # --------------------------------------------------------------------
135
+ # Constructors
136
+
137
+ def __new__(
138
+ cls,
139
+ start=None,
140
+ stop=None,
141
+ step=None,
142
+ dtype: Dtype | None = None,
143
+ copy: bool = False,
144
+ name: Hashable | None = None,
145
+ ) -> Self:
146
+ cls._validate_dtype(dtype)
147
+ name = maybe_extract_name(name, start, cls)
148
+
149
+ # RangeIndex
150
+ if isinstance(start, cls):
151
+ return start.copy(name=name)
152
+ elif isinstance(start, range):
153
+ return cls._simple_new(start, name=name)
154
+
155
+ # validate the arguments
156
+ if com.all_none(start, stop, step):
157
+ raise TypeError("RangeIndex(...) must be called with integers")
158
+
159
+ start = ensure_python_int(start) if start is not None else 0
160
+
161
+ if stop is None:
162
+ start, stop = 0, start
163
+ else:
164
+ stop = ensure_python_int(stop)
165
+
166
+ step = ensure_python_int(step) if step is not None else 1
167
+ if step == 0:
168
+ raise ValueError("Step must not be zero")
169
+
170
+ rng = range(start, stop, step)
171
+ return cls._simple_new(rng, name=name)
172
+
173
+ @classmethod
174
+ def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self:
175
+ """
176
+ Create :class:`pandas.RangeIndex` from a ``range`` object.
177
+
178
+ Returns
179
+ -------
180
+ RangeIndex
181
+
182
+ Examples
183
+ --------
184
+ >>> pd.RangeIndex.from_range(range(5))
185
+ RangeIndex(start=0, stop=5, step=1)
186
+
187
+ >>> pd.RangeIndex.from_range(range(2, -10, -3))
188
+ RangeIndex(start=2, stop=-10, step=-3)
189
+ """
190
+ if not isinstance(data, range):
191
+ raise TypeError(
192
+ f"{cls.__name__}(...) must be called with object coercible to a "
193
+ f"range, {repr(data)} was passed"
194
+ )
195
+ cls._validate_dtype(dtype)
196
+ return cls._simple_new(data, name=name)
197
+
198
+ # error: Argument 1 of "_simple_new" is incompatible with supertype "Index";
199
+ # supertype defines the argument type as
200
+ # "Union[ExtensionArray, ndarray[Any, Any]]" [override]
201
+ @classmethod
202
+ def _simple_new( # type: ignore[override]
203
+ cls, values: range, name: Hashable | None = None
204
+ ) -> Self:
205
+ result = object.__new__(cls)
206
+
207
+ assert isinstance(values, range)
208
+
209
+ result._range = values
210
+ result._name = name
211
+ result._cache = {}
212
+ result._reset_identity()
213
+ result._references = None
214
+ return result
215
+
216
+ @classmethod
217
+ def _validate_dtype(cls, dtype: Dtype | None) -> None:
218
+ if dtype is None:
219
+ return
220
+
221
+ validation_func, expected = cls._dtype_validation_metadata
222
+ if not validation_func(dtype):
223
+ raise ValueError(
224
+ f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
225
+ )
226
+
227
+ # --------------------------------------------------------------------
228
+
229
+ # error: Return type "Type[Index]" of "_constructor" incompatible with return
230
+ # type "Type[RangeIndex]" in supertype "Index"
231
+ @cache_readonly
232
+ def _constructor(self) -> type[Index]: # type: ignore[override]
233
+ """return the class to use for construction"""
234
+ return Index
235
+
236
+ # error: Signature of "_data" incompatible with supertype "Index"
237
+ @cache_readonly
238
+ def _data(self) -> np.ndarray: # type: ignore[override]
239
+ """
240
+ An int array that for performance reasons is created only when needed.
241
+
242
+ The constructed array is saved in ``_cache``.
243
+ """
244
+ return np.arange(self.start, self.stop, self.step, dtype=np.int64)
245
+
246
+ def _get_data_as_items(self) -> list[tuple[str, int]]:
247
+ """return a list of tuples of start, stop, step"""
248
+ rng = self._range
249
+ return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
250
+
251
+ def __reduce__(self):
252
+ d = {"name": self._name}
253
+ d.update(dict(self._get_data_as_items()))
254
+ return ibase._new_Index, (type(self), d), None
255
+
256
+ # --------------------------------------------------------------------
257
+ # Rendering Methods
258
+
259
+ def _format_attrs(self):
260
+ """
261
+ Return a list of tuples of the (attr, formatted_value)
262
+ """
263
+ attrs = cast("list[tuple[str, str | int]]", self._get_data_as_items())
264
+ if self._name is not None:
265
+ attrs.append(("name", ibase.default_pprint(self._name)))
266
+ return attrs
267
+
268
+ def _format_with_header(self, *, header: list[str], na_rep: str) -> list[str]:
269
+ # Equivalent to Index implementation, but faster
270
+ if not len(self._range):
271
+ return header
272
+ first_val_str = str(self._range[0])
273
+ last_val_str = str(self._range[-1])
274
+ max_length = max(len(first_val_str), len(last_val_str))
275
+
276
+ return header + [f"{x:<{max_length}}" for x in self._range]
277
+
278
+ # --------------------------------------------------------------------
279
+
280
+ @property
281
+ def start(self) -> int:
282
+ """
283
+ The value of the `start` parameter (``0`` if this was not supplied).
284
+
285
+ Examples
286
+ --------
287
+ >>> idx = pd.RangeIndex(5)
288
+ >>> idx.start
289
+ 0
290
+
291
+ >>> idx = pd.RangeIndex(2, -10, -3)
292
+ >>> idx.start
293
+ 2
294
+ """
295
+ # GH 25710
296
+ return self._range.start
297
+
298
+ @property
299
+ def stop(self) -> int:
300
+ """
301
+ The value of the `stop` parameter.
302
+
303
+ Examples
304
+ --------
305
+ >>> idx = pd.RangeIndex(5)
306
+ >>> idx.stop
307
+ 5
308
+
309
+ >>> idx = pd.RangeIndex(2, -10, -3)
310
+ >>> idx.stop
311
+ -10
312
+ """
313
+ return self._range.stop
314
+
315
+ @property
316
+ def step(self) -> int:
317
+ """
318
+ The value of the `step` parameter (``1`` if this was not supplied).
319
+
320
+ Examples
321
+ --------
322
+ >>> idx = pd.RangeIndex(5)
323
+ >>> idx.step
324
+ 1
325
+
326
+ >>> idx = pd.RangeIndex(2, -10, -3)
327
+ >>> idx.step
328
+ -3
329
+
330
+ Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if
331
+ not supplied.
332
+
333
+ >>> idx = pd.RangeIndex(1, 0)
334
+ >>> idx.step
335
+ 1
336
+ """
337
+ # GH 25710
338
+ return self._range.step
339
+
340
+ @cache_readonly
341
+ def nbytes(self) -> int:
342
+ """
343
+ Return the number of bytes in the underlying data.
344
+ """
345
+ rng = self._range
346
+ return getsizeof(rng) + sum(
347
+ getsizeof(getattr(rng, attr_name))
348
+ for attr_name in ["start", "stop", "step"]
349
+ )
350
+
351
+ def memory_usage(self, deep: bool = False) -> int:
352
+ """
353
+ Memory usage of my values
354
+
355
+ Parameters
356
+ ----------
357
+ deep : bool
358
+ Introspect the data deeply, interrogate
359
+ `object` dtypes for system-level memory consumption
360
+
361
+ Returns
362
+ -------
363
+ bytes used
364
+
365
+ Notes
366
+ -----
367
+ Memory usage does not include memory consumed by elements that
368
+ are not components of the array if deep=False
369
+
370
+ See Also
371
+ --------
372
+ numpy.ndarray.nbytes
373
+ """
374
+ return self.nbytes
375
+
376
+ @property
377
+ def dtype(self) -> np.dtype:
378
+ return _dtype_int64
379
+
380
+ @property
381
+ def is_unique(self) -> bool:
382
+ """return if the index has unique values"""
383
+ return True
384
+
385
+ @cache_readonly
386
+ def is_monotonic_increasing(self) -> bool:
387
+ return self._range.step > 0 or len(self) <= 1
388
+
389
+ @cache_readonly
390
+ def is_monotonic_decreasing(self) -> bool:
391
+ return self._range.step < 0 or len(self) <= 1
392
+
393
+ def __contains__(self, key: Any) -> bool:
394
+ hash(key)
395
+ try:
396
+ key = ensure_python_int(key)
397
+ except TypeError:
398
+ return False
399
+ return key in self._range
400
+
401
+ @property
402
+ def inferred_type(self) -> str:
403
+ return "integer"
404
+
405
+ # --------------------------------------------------------------------
406
+ # Indexing Methods
407
+
408
+ @doc(Index.get_loc)
409
+ def get_loc(self, key) -> int:
410
+ if is_integer(key) or (is_float(key) and key.is_integer()):
411
+ new_key = int(key)
412
+ try:
413
+ return self._range.index(new_key)
414
+ except ValueError as err:
415
+ raise KeyError(key) from err
416
+ if isinstance(key, Hashable):
417
+ raise KeyError(key)
418
+ self._check_indexing_error(key)
419
+ raise KeyError(key)
420
+
421
+ def _get_indexer(
422
+ self,
423
+ target: Index,
424
+ method: str | None = None,
425
+ limit: int | None = None,
426
+ tolerance=None,
427
+ ) -> npt.NDArray[np.intp]:
428
+ if com.any_not_none(method, tolerance, limit):
429
+ return super()._get_indexer(
430
+ target, method=method, tolerance=tolerance, limit=limit
431
+ )
432
+
433
+ if self.step > 0:
434
+ start, stop, step = self.start, self.stop, self.step
435
+ else:
436
+ # GH 28678: work on reversed range for simplicity
437
+ reverse = self._range[::-1]
438
+ start, stop, step = reverse.start, reverse.stop, reverse.step
439
+
440
+ target_array = np.asarray(target)
441
+ locs = target_array - start
442
+ valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
443
+ locs[~valid] = -1
444
+ locs[valid] = locs[valid] / step
445
+
446
+ if step != self.step:
447
+ # We reversed this range: transform to original locs
448
+ locs[valid] = len(self) - 1 - locs[valid]
449
+ return ensure_platform_int(locs)
450
+
451
+ @cache_readonly
452
+ def _should_fallback_to_positional(self) -> bool:
453
+ """
454
+ Should an integer key be treated as positional?
455
+ """
456
+ return False
457
+
458
+ # --------------------------------------------------------------------
459
+
460
+ def tolist(self) -> list[int]:
461
+ return list(self._range)
462
+
463
+ @doc(Index.__iter__)
464
+ def __iter__(self) -> Iterator[int]:
465
+ yield from self._range
466
+
467
+ @doc(Index._shallow_copy)
468
+ def _shallow_copy(self, values, name: Hashable = no_default):
469
+ name = self._name if name is no_default else name
470
+
471
+ if values.dtype.kind == "f":
472
+ return Index(values, name=name, dtype=np.float64)
473
+ # GH 46675 & 43885: If values is equally spaced, return a
474
+ # more memory-compact RangeIndex instead of Index with 64-bit dtype
475
+ unique_diffs = unique_deltas(values)
476
+ if len(unique_diffs) == 1 and unique_diffs[0] != 0:
477
+ diff = unique_diffs[0]
478
+ new_range = range(values[0], values[-1] + diff, diff)
479
+ return type(self)._simple_new(new_range, name=name)
480
+ else:
481
+ return self._constructor._simple_new(values, name=name)
482
+
483
+ def _view(self) -> Self:
484
+ result = type(self)._simple_new(self._range, name=self._name)
485
+ result._cache = self._cache
486
+ return result
487
+
488
+ @doc(Index.copy)
489
+ def copy(self, name: Hashable | None = None, deep: bool = False) -> Self:
490
+ name = self._validate_names(name=name, deep=deep)[0]
491
+ new_index = self._rename(name=name)
492
+ return new_index
493
+
494
+ def _minmax(self, meth: str):
495
+ no_steps = len(self) - 1
496
+ if no_steps == -1:
497
+ return np.nan
498
+ elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
499
+ return self.start
500
+
501
+ return self.start + self.step * no_steps
502
+
503
+ def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
504
+ """The minimum value of the RangeIndex"""
505
+ nv.validate_minmax_axis(axis)
506
+ nv.validate_min(args, kwargs)
507
+ return self._minmax("min")
508
+
509
+ def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
510
+ """The maximum value of the RangeIndex"""
511
+ nv.validate_minmax_axis(axis)
512
+ nv.validate_max(args, kwargs)
513
+ return self._minmax("max")
514
+
515
+ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
516
+ """
517
+ Returns the indices that would sort the index and its
518
+ underlying data.
519
+
520
+ Returns
521
+ -------
522
+ np.ndarray[np.intp]
523
+
524
+ See Also
525
+ --------
526
+ numpy.ndarray.argsort
527
+ """
528
+ ascending = kwargs.pop("ascending", True) # EA compat
529
+ kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant
530
+ nv.validate_argsort(args, kwargs)
531
+
532
+ if self._range.step > 0:
533
+ result = np.arange(len(self), dtype=np.intp)
534
+ else:
535
+ result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
536
+
537
+ if not ascending:
538
+ result = result[::-1]
539
+ return result
540
+
541
+ def factorize(
542
+ self,
543
+ sort: bool = False,
544
+ use_na_sentinel: bool = True,
545
+ ) -> tuple[npt.NDArray[np.intp], RangeIndex]:
546
+ codes = np.arange(len(self), dtype=np.intp)
547
+ uniques = self
548
+ if sort and self.step < 0:
549
+ codes = codes[::-1]
550
+ uniques = uniques[::-1]
551
+ return codes, uniques
552
+
553
+ def equals(self, other: object) -> bool:
554
+ """
555
+ Determines if two Index objects contain the same elements.
556
+ """
557
+ if isinstance(other, RangeIndex):
558
+ return self._range == other._range
559
+ return super().equals(other)
560
+
561
+ # error: Signature of "sort_values" incompatible with supertype "Index"
562
+ @overload # type: ignore[override]
563
+ def sort_values(
564
+ self,
565
+ *,
566
+ return_indexer: Literal[False] = ...,
567
+ ascending: bool = ...,
568
+ na_position: NaPosition = ...,
569
+ key: Callable | None = ...,
570
+ ) -> Self:
571
+ ...
572
+
573
+ @overload
574
+ def sort_values(
575
+ self,
576
+ *,
577
+ return_indexer: Literal[True],
578
+ ascending: bool = ...,
579
+ na_position: NaPosition = ...,
580
+ key: Callable | None = ...,
581
+ ) -> tuple[Self, np.ndarray | RangeIndex]:
582
+ ...
583
+
584
+ @overload
585
+ def sort_values(
586
+ self,
587
+ *,
588
+ return_indexer: bool = ...,
589
+ ascending: bool = ...,
590
+ na_position: NaPosition = ...,
591
+ key: Callable | None = ...,
592
+ ) -> Self | tuple[Self, np.ndarray | RangeIndex]:
593
+ ...
594
+
595
+ @deprecate_nonkeyword_arguments(
596
+ version="3.0", allowed_args=["self"], name="sort_values"
597
+ )
598
+ def sort_values(
599
+ self,
600
+ return_indexer: bool = False,
601
+ ascending: bool = True,
602
+ na_position: NaPosition = "last",
603
+ key: Callable | None = None,
604
+ ) -> Self | tuple[Self, np.ndarray | RangeIndex]:
605
+ if key is not None:
606
+ return super().sort_values(
607
+ return_indexer=return_indexer,
608
+ ascending=ascending,
609
+ na_position=na_position,
610
+ key=key,
611
+ )
612
+ else:
613
+ sorted_index = self
614
+ inverse_indexer = False
615
+ if ascending:
616
+ if self.step < 0:
617
+ sorted_index = self[::-1]
618
+ inverse_indexer = True
619
+ else:
620
+ if self.step > 0:
621
+ sorted_index = self[::-1]
622
+ inverse_indexer = True
623
+
624
+ if return_indexer:
625
+ if inverse_indexer:
626
+ rng = range(len(self) - 1, -1, -1)
627
+ else:
628
+ rng = range(len(self))
629
+ return sorted_index, RangeIndex(rng)
630
+ else:
631
+ return sorted_index
632
+
633
+ # --------------------------------------------------------------------
634
+ # Set Operations
635
+
636
+ def _intersection(self, other: Index, sort: bool = False):
637
+ # caller is responsible for checking self and other are both non-empty
638
+
639
+ if not isinstance(other, RangeIndex):
640
+ return super()._intersection(other, sort=sort)
641
+
642
+ first = self._range[::-1] if self.step < 0 else self._range
643
+ second = other._range[::-1] if other.step < 0 else other._range
644
+
645
+ # check whether intervals intersect
646
+ # deals with in- and decreasing ranges
647
+ int_low = max(first.start, second.start)
648
+ int_high = min(first.stop, second.stop)
649
+ if int_high <= int_low:
650
+ return self._simple_new(_empty_range)
651
+
652
+ # Method hint: linear Diophantine equation
653
+ # solve intersection problem
654
+ # performance hint: for identical step sizes, could use
655
+ # cheaper alternative
656
+ gcd, s, _ = self._extended_gcd(first.step, second.step)
657
+
658
+ # check whether element sets intersect
659
+ if (first.start - second.start) % gcd:
660
+ return self._simple_new(_empty_range)
661
+
662
+ # calculate parameters for the RangeIndex describing the
663
+ # intersection disregarding the lower bounds
664
+ tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
665
+ new_step = first.step * second.step // gcd
666
+ new_range = range(tmp_start, int_high, new_step)
667
+ new_index = self._simple_new(new_range)
668
+
669
+ # adjust index to limiting interval
670
+ new_start = new_index._min_fitting_element(int_low)
671
+ new_range = range(new_start, new_index.stop, new_index.step)
672
+ new_index = self._simple_new(new_range)
673
+
674
+ if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
675
+ new_index = new_index[::-1]
676
+
677
+ if sort is None:
678
+ new_index = new_index.sort_values()
679
+
680
+ return new_index
681
+
682
+ def _min_fitting_element(self, lower_limit: int) -> int:
683
+ """Returns the smallest element greater than or equal to the limit"""
684
+ no_steps = -(-(lower_limit - self.start) // abs(self.step))
685
+ return self.start + abs(self.step) * no_steps
686
+
687
+ def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
688
+ """
689
+ Extended Euclidean algorithms to solve Bezout's identity:
690
+ a*x + b*y = gcd(x, y)
691
+ Finds one particular solution for x, y: s, t
692
+ Returns: gcd, s, t
693
+ """
694
+ s, old_s = 0, 1
695
+ t, old_t = 1, 0
696
+ r, old_r = b, a
697
+ while r:
698
+ quotient = old_r // r
699
+ old_r, r = r, old_r - quotient * r
700
+ old_s, s = s, old_s - quotient * s
701
+ old_t, t = t, old_t - quotient * t
702
+ return old_r, old_s, old_t
703
+
704
+ def _range_in_self(self, other: range) -> bool:
705
+ """Check if other range is contained in self"""
706
+ # https://stackoverflow.com/a/32481015
707
+ if not other:
708
+ return True
709
+ if not self._range:
710
+ return False
711
+ if len(other) > 1 and other.step % self._range.step:
712
+ return False
713
+ return other.start in self._range and other[-1] in self._range
714
+
715
+ def _union(self, other: Index, sort: bool | None):
716
+ """
717
+ Form the union of two Index objects and sorts if possible
718
+
719
+ Parameters
720
+ ----------
721
+ other : Index or array-like
722
+
723
+ sort : bool or None, default None
724
+ Whether to sort (monotonically increasing) the resulting index.
725
+ ``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted
726
+ ``Index`` with a int64 dtype if not.
727
+ ``sort=False`` can return a ``RangeIndex`` if self is monotonically
728
+ increasing and other is fully contained in self. Otherwise, returns
729
+ an unsorted ``Index`` with an int64 dtype.
730
+
731
+ Returns
732
+ -------
733
+ union : Index
734
+ """
735
+ if isinstance(other, RangeIndex):
736
+ if sort in (None, True) or (
737
+ sort is False and self.step > 0 and self._range_in_self(other._range)
738
+ ):
739
+ # GH 47557: Can still return a RangeIndex
740
+ # if other range in self and sort=False
741
+ start_s, step_s = self.start, self.step
742
+ end_s = self.start + self.step * (len(self) - 1)
743
+ start_o, step_o = other.start, other.step
744
+ end_o = other.start + other.step * (len(other) - 1)
745
+ if self.step < 0:
746
+ start_s, step_s, end_s = end_s, -step_s, start_s
747
+ if other.step < 0:
748
+ start_o, step_o, end_o = end_o, -step_o, start_o
749
+ if len(self) == 1 and len(other) == 1:
750
+ step_s = step_o = abs(self.start - other.start)
751
+ elif len(self) == 1:
752
+ step_s = step_o
753
+ elif len(other) == 1:
754
+ step_o = step_s
755
+ start_r = min(start_s, start_o)
756
+ end_r = max(end_s, end_o)
757
+ if step_o == step_s:
758
+ if (
759
+ (start_s - start_o) % step_s == 0
760
+ and (start_s - end_o) <= step_s
761
+ and (start_o - end_s) <= step_s
762
+ ):
763
+ return type(self)(start_r, end_r + step_s, step_s)
764
+ if (
765
+ (step_s % 2 == 0)
766
+ and (abs(start_s - start_o) == step_s / 2)
767
+ and (abs(end_s - end_o) == step_s / 2)
768
+ ):
769
+ # e.g. range(0, 10, 2) and range(1, 11, 2)
770
+ # but not range(0, 20, 4) and range(1, 21, 4) GH#44019
771
+ return type(self)(start_r, end_r + step_s / 2, step_s / 2)
772
+
773
+ elif step_o % step_s == 0:
774
+ if (
775
+ (start_o - start_s) % step_s == 0
776
+ and (start_o + step_s >= start_s)
777
+ and (end_o - step_s <= end_s)
778
+ ):
779
+ return type(self)(start_r, end_r + step_s, step_s)
780
+ elif step_s % step_o == 0:
781
+ if (
782
+ (start_s - start_o) % step_o == 0
783
+ and (start_s + step_o >= start_o)
784
+ and (end_s - step_o <= end_o)
785
+ ):
786
+ return type(self)(start_r, end_r + step_o, step_o)
787
+
788
+ return super()._union(other, sort=sort)
789
+
790
+ def _difference(self, other, sort=None):
791
+ # optimized set operation if we have another RangeIndex
792
+ self._validate_sort_keyword(sort)
793
+ self._assert_can_do_setop(other)
794
+ other, result_name = self._convert_can_do_setop(other)
795
+
796
+ if not isinstance(other, RangeIndex):
797
+ return super()._difference(other, sort=sort)
798
+
799
+ if sort is not False and self.step < 0:
800
+ return self[::-1]._difference(other)
801
+
802
+ res_name = ops.get_op_result_name(self, other)
803
+
804
+ first = self._range[::-1] if self.step < 0 else self._range
805
+ overlap = self.intersection(other)
806
+ if overlap.step < 0:
807
+ overlap = overlap[::-1]
808
+
809
+ if len(overlap) == 0:
810
+ return self.rename(name=res_name)
811
+ if len(overlap) == len(self):
812
+ return self[:0].rename(res_name)
813
+
814
+ # overlap.step will always be a multiple of self.step (see _intersection)
815
+
816
+ if len(overlap) == 1:
817
+ if overlap[0] == self[0]:
818
+ return self[1:]
819
+
820
+ elif overlap[0] == self[-1]:
821
+ return self[:-1]
822
+
823
+ elif len(self) == 3 and overlap[0] == self[1]:
824
+ return self[::2]
825
+
826
+ else:
827
+ return super()._difference(other, sort=sort)
828
+
829
+ elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]:
830
+ # e.g. range(-8, 20, 7) and range(13, -9, -3)
831
+ return self[1:-1]
832
+
833
+ if overlap.step == first.step:
834
+ if overlap[0] == first.start:
835
+ # The difference is everything after the intersection
836
+ new_rng = range(overlap[-1] + first.step, first.stop, first.step)
837
+ elif overlap[-1] == first[-1]:
838
+ # The difference is everything before the intersection
839
+ new_rng = range(first.start, overlap[0], first.step)
840
+ elif overlap._range == first[1:-1]:
841
+ # e.g. range(4) and range(1, 3)
842
+ step = len(first) - 1
843
+ new_rng = first[::step]
844
+ else:
845
+ # The difference is not range-like
846
+ # e.g. range(1, 10, 1) and range(3, 7, 1)
847
+ return super()._difference(other, sort=sort)
848
+
849
+ else:
850
+ # We must have len(self) > 1, bc we ruled out above
851
+ # len(overlap) == 0 and len(overlap) == len(self)
852
+ assert len(self) > 1
853
+
854
+ if overlap.step == first.step * 2:
855
+ if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]):
856
+ # e.g. range(1, 10, 1) and range(1, 10, 2)
857
+ new_rng = first[1::2]
858
+
859
+ elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]):
860
+ # e.g. range(1, 10, 1) and range(2, 10, 2)
861
+ new_rng = first[::2]
862
+
863
+ else:
864
+ # We can get here with e.g. range(20) and range(0, 10, 2)
865
+ return super()._difference(other, sort=sort)
866
+
867
+ else:
868
+ # e.g. range(10) and range(0, 10, 3)
869
+ return super()._difference(other, sort=sort)
870
+
871
+ new_index = type(self)._simple_new(new_rng, name=res_name)
872
+ if first is not self._range:
873
+ new_index = new_index[::-1]
874
+
875
+ return new_index
876
+
877
+ def symmetric_difference(
878
+ self, other, result_name: Hashable | None = None, sort=None
879
+ ):
880
+ if not isinstance(other, RangeIndex) or sort is not None:
881
+ return super().symmetric_difference(other, result_name, sort)
882
+
883
+ left = self.difference(other)
884
+ right = other.difference(self)
885
+ result = left.union(right)
886
+
887
+ if result_name is not None:
888
+ result = result.rename(result_name)
889
+ return result
890
+
891
+ # --------------------------------------------------------------------
892
+
893
+ # error: Return type "Index" of "delete" incompatible with return type
894
+ # "RangeIndex" in supertype "Index"
895
+ def delete(self, loc) -> Index: # type: ignore[override]
896
+ # In some cases we can retain RangeIndex, see also
897
+ # DatetimeTimedeltaMixin._get_delete_Freq
898
+ if is_integer(loc):
899
+ if loc in (0, -len(self)):
900
+ return self[1:]
901
+ if loc in (-1, len(self) - 1):
902
+ return self[:-1]
903
+ if len(self) == 3 and loc in (1, -2):
904
+ return self[::2]
905
+
906
+ elif lib.is_list_like(loc):
907
+ slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
908
+
909
+ if isinstance(slc, slice):
910
+ # defer to RangeIndex._difference, which is optimized to return
911
+ # a RangeIndex whenever possible
912
+ other = self[slc]
913
+ return self.difference(other, sort=False)
914
+
915
+ return super().delete(loc)
916
+
917
+ def insert(self, loc: int, item) -> Index:
918
+ if len(self) and (is_integer(item) or is_float(item)):
919
+ # We can retain RangeIndex is inserting at the beginning or end,
920
+ # or right in the middle.
921
+ rng = self._range
922
+ if loc == 0 and item == self[0] - self.step:
923
+ new_rng = range(rng.start - rng.step, rng.stop, rng.step)
924
+ return type(self)._simple_new(new_rng, name=self._name)
925
+
926
+ elif loc == len(self) and item == self[-1] + self.step:
927
+ new_rng = range(rng.start, rng.stop + rng.step, rng.step)
928
+ return type(self)._simple_new(new_rng, name=self._name)
929
+
930
+ elif len(self) == 2 and item == self[0] + self.step / 2:
931
+ # e.g. inserting 1 into [0, 2]
932
+ step = int(self.step / 2)
933
+ new_rng = range(self.start, self.stop, step)
934
+ return type(self)._simple_new(new_rng, name=self._name)
935
+
936
+ return super().insert(loc, item)
937
+
938
+ def _concat(self, indexes: list[Index], name: Hashable) -> Index:
939
+ """
940
+ Overriding parent method for the case of all RangeIndex instances.
941
+
942
+ When all members of "indexes" are of type RangeIndex: result will be
943
+ RangeIndex if possible, Index with a int64 dtype otherwise. E.g.:
944
+ indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
945
+ indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64')
946
+ """
947
+ if not all(isinstance(x, RangeIndex) for x in indexes):
948
+ return super()._concat(indexes, name)
949
+
950
+ elif len(indexes) == 1:
951
+ return indexes[0]
952
+
953
+ rng_indexes = cast(list[RangeIndex], indexes)
954
+
955
+ start = step = next_ = None
956
+
957
+ # Filter the empty indexes
958
+ non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
959
+
960
+ for obj in non_empty_indexes:
961
+ rng = obj._range
962
+
963
+ if start is None:
964
+ # This is set by the first non-empty index
965
+ start = rng.start
966
+ if step is None and len(rng) > 1:
967
+ step = rng.step
968
+ elif step is None:
969
+ # First non-empty index had only one element
970
+ if rng.start == start:
971
+ values = np.concatenate([x._values for x in rng_indexes])
972
+ result = self._constructor(values)
973
+ return result.rename(name)
974
+
975
+ step = rng.start - start
976
+
977
+ non_consecutive = (step != rng.step and len(rng) > 1) or (
978
+ next_ is not None and rng.start != next_
979
+ )
980
+ if non_consecutive:
981
+ result = self._constructor(
982
+ np.concatenate([x._values for x in rng_indexes])
983
+ )
984
+ return result.rename(name)
985
+
986
+ if step is not None:
987
+ next_ = rng[-1] + step
988
+
989
+ if non_empty_indexes:
990
+ # Get the stop value from "next" or alternatively
991
+ # from the last non-empty index
992
+ stop = non_empty_indexes[-1].stop if next_ is None else next_
993
+ return RangeIndex(start, stop, step).rename(name)
994
+
995
+ # Here all "indexes" had 0 length, i.e. were empty.
996
+ # In this case return an empty range index.
997
+ return RangeIndex(0, 0).rename(name)
998
+
999
+ def __len__(self) -> int:
1000
+ """
1001
+ return the length of the RangeIndex
1002
+ """
1003
+ return len(self._range)
1004
+
1005
+ @property
1006
+ def size(self) -> int:
1007
+ return len(self)
1008
+
1009
+ def __getitem__(self, key):
1010
+ """
1011
+ Conserve RangeIndex type for scalar and slice keys.
1012
+ """
1013
+ if isinstance(key, slice):
1014
+ return self._getitem_slice(key)
1015
+ elif is_integer(key):
1016
+ new_key = int(key)
1017
+ try:
1018
+ return self._range[new_key]
1019
+ except IndexError as err:
1020
+ raise IndexError(
1021
+ f"index {key} is out of bounds for axis 0 with size {len(self)}"
1022
+ ) from err
1023
+ elif is_scalar(key):
1024
+ raise IndexError(
1025
+ "only integers, slices (`:`), "
1026
+ "ellipsis (`...`), numpy.newaxis (`None`) "
1027
+ "and integer or boolean "
1028
+ "arrays are valid indices"
1029
+ )
1030
+ return super().__getitem__(key)
1031
+
1032
+ def _getitem_slice(self, slobj: slice) -> Self:
1033
+ """
1034
+ Fastpath for __getitem__ when we know we have a slice.
1035
+ """
1036
+ res = self._range[slobj]
1037
+ return type(self)._simple_new(res, name=self._name)
1038
+
1039
+ @unpack_zerodim_and_defer("__floordiv__")
1040
+ def __floordiv__(self, other):
1041
+ if is_integer(other) and other != 0:
1042
+ if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
1043
+ start = self.start // other
1044
+ step = self.step // other
1045
+ stop = start + len(self) * step
1046
+ new_range = range(start, stop, step or 1)
1047
+ return self._simple_new(new_range, name=self._name)
1048
+ if len(self) == 1:
1049
+ start = self.start // other
1050
+ new_range = range(start, start + 1, 1)
1051
+ return self._simple_new(new_range, name=self._name)
1052
+
1053
+ return super().__floordiv__(other)
1054
+
1055
+ # --------------------------------------------------------------------
1056
+ # Reductions
1057
+
1058
+ def all(self, *args, **kwargs) -> bool:
1059
+ return 0 not in self._range
1060
+
1061
+ def any(self, *args, **kwargs) -> bool:
1062
+ return any(self._range)
1063
+
1064
+ # --------------------------------------------------------------------
1065
+
1066
+ def _cmp_method(self, other, op):
1067
+ if isinstance(other, RangeIndex) and self._range == other._range:
1068
+ # Both are immutable so if ._range attr. are equal, shortcut is possible
1069
+ return super()._cmp_method(self, op)
1070
+ return super()._cmp_method(other, op)
1071
+
1072
+ def _arith_method(self, other, op):
1073
+ """
1074
+ Parameters
1075
+ ----------
1076
+ other : Any
1077
+ op : callable that accepts 2 params
1078
+ perform the binary op
1079
+ """
1080
+
1081
+ if isinstance(other, ABCTimedeltaIndex):
1082
+ # Defer to TimedeltaIndex implementation
1083
+ return NotImplemented
1084
+ elif isinstance(other, (timedelta, np.timedelta64)):
1085
+ # GH#19333 is_integer evaluated True on timedelta64,
1086
+ # so we need to catch these explicitly
1087
+ return super()._arith_method(other, op)
1088
+ elif lib.is_np_dtype(getattr(other, "dtype", None), "m"):
1089
+ # Must be an np.ndarray; GH#22390
1090
+ return super()._arith_method(other, op)
1091
+
1092
+ if op in [
1093
+ operator.pow,
1094
+ ops.rpow,
1095
+ operator.mod,
1096
+ ops.rmod,
1097
+ operator.floordiv,
1098
+ ops.rfloordiv,
1099
+ divmod,
1100
+ ops.rdivmod,
1101
+ ]:
1102
+ return super()._arith_method(other, op)
1103
+
1104
+ step: Callable | None = None
1105
+ if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
1106
+ step = op
1107
+
1108
+ # TODO: if other is a RangeIndex we may have more efficient options
1109
+ right = extract_array(other, extract_numpy=True, extract_range=True)
1110
+ left = self
1111
+
1112
+ try:
1113
+ # apply if we have an override
1114
+ if step:
1115
+ with np.errstate(all="ignore"):
1116
+ rstep = step(left.step, right)
1117
+
1118
+ # we don't have a representable op
1119
+ # so return a base index
1120
+ if not is_integer(rstep) or not rstep:
1121
+ raise ValueError
1122
+
1123
+ # GH#53255
1124
+ else:
1125
+ rstep = -left.step if op == ops.rsub else left.step
1126
+
1127
+ with np.errstate(all="ignore"):
1128
+ rstart = op(left.start, right)
1129
+ rstop = op(left.stop, right)
1130
+
1131
+ res_name = ops.get_op_result_name(self, other)
1132
+ result = type(self)(rstart, rstop, rstep, name=res_name)
1133
+
1134
+ # for compat with numpy / Index with int64 dtype
1135
+ # even if we can represent as a RangeIndex, return
1136
+ # as a float64 Index if we have float-like descriptors
1137
+ if not all(is_integer(x) for x in [rstart, rstop, rstep]):
1138
+ result = result.astype("float64")
1139
+
1140
+ return result
1141
+
1142
+ except (ValueError, TypeError, ZeroDivisionError):
1143
+ # test_arithmetic_explicit_conversions
1144
+ return super()._arith_method(other, op)
1145
+
1146
+ # error: Return type "Index" of "take" incompatible with return type
1147
+ # "RangeIndex" in supertype "Index"
1148
+ def take( # type: ignore[override]
1149
+ self,
1150
+ indices,
1151
+ axis: Axis = 0,
1152
+ allow_fill: bool = True,
1153
+ fill_value=None,
1154
+ **kwargs,
1155
+ ) -> Index:
1156
+ if kwargs:
1157
+ nv.validate_take((), kwargs)
1158
+ if is_scalar(indices):
1159
+ raise TypeError("Expected indices to be array-like")
1160
+ indices = ensure_platform_int(indices)
1161
+
1162
+ # raise an exception if allow_fill is True and fill_value is not None
1163
+ self._maybe_disallow_fill(allow_fill, fill_value, indices)
1164
+
1165
+ if len(indices) == 0:
1166
+ taken = np.array([], dtype=self.dtype)
1167
+ else:
1168
+ ind_max = indices.max()
1169
+ if ind_max >= len(self):
1170
+ raise IndexError(
1171
+ f"index {ind_max} is out of bounds for axis 0 with size {len(self)}"
1172
+ )
1173
+ ind_min = indices.min()
1174
+ if ind_min < -len(self):
1175
+ raise IndexError(
1176
+ f"index {ind_min} is out of bounds for axis 0 with size {len(self)}"
1177
+ )
1178
+ taken = indices.astype(self.dtype, casting="safe")
1179
+ if ind_min < 0:
1180
+ taken %= len(self)
1181
+ if self.step != 1:
1182
+ taken *= self.step
1183
+ if self.start != 0:
1184
+ taken += self.start
1185
+
1186
+ # _constructor so RangeIndex-> Index with an int64 dtype
1187
+ return self._constructor._simple_new(taken, name=self.name)
env-llmeval/lib/python3.10/site-packages/pandas/core/indexes/timedeltas.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ implement the TimedeltaIndex """
2
+ from __future__ import annotations
3
+
4
+ from typing import TYPE_CHECKING
5
+ import warnings
6
+
7
+ from pandas._libs import (
8
+ index as libindex,
9
+ lib,
10
+ )
11
+ from pandas._libs.tslibs import (
12
+ Resolution,
13
+ Timedelta,
14
+ to_offset,
15
+ )
16
+ from pandas._libs.tslibs.timedeltas import disallow_ambiguous_unit
17
+ from pandas.util._exceptions import find_stack_level
18
+
19
+ from pandas.core.dtypes.common import (
20
+ is_scalar,
21
+ pandas_dtype,
22
+ )
23
+ from pandas.core.dtypes.generic import ABCSeries
24
+
25
+ from pandas.core.arrays.timedeltas import TimedeltaArray
26
+ import pandas.core.common as com
27
+ from pandas.core.indexes.base import (
28
+ Index,
29
+ maybe_extract_name,
30
+ )
31
+ from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
32
+ from pandas.core.indexes.extension import inherit_names
33
+
34
+ if TYPE_CHECKING:
35
+ from pandas._typing import DtypeObj
36
+
37
+
38
+ @inherit_names(
39
+ ["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]
40
+ + TimedeltaArray._field_ops,
41
+ TimedeltaArray,
42
+ wrap=True,
43
+ )
44
+ @inherit_names(
45
+ [
46
+ "components",
47
+ "to_pytimedelta",
48
+ "sum",
49
+ "std",
50
+ "median",
51
+ ],
52
+ TimedeltaArray,
53
+ )
54
+ class TimedeltaIndex(DatetimeTimedeltaMixin):
55
+ """
56
+ Immutable Index of timedelta64 data.
57
+
58
+ Represented internally as int64, and scalars returned Timedelta objects.
59
+
60
+ Parameters
61
+ ----------
62
+ data : array-like (1-dimensional), optional
63
+ Optional timedelta-like data to construct index with.
64
+ unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional
65
+ The unit of ``data``.
66
+
67
+ .. deprecated:: 2.2.0
68
+ Use ``pd.to_timedelta`` instead.
69
+
70
+ freq : str or pandas offset object, optional
71
+ One of pandas date offset strings or corresponding objects. The string
72
+ ``'infer'`` can be passed in order to set the frequency of the index as
73
+ the inferred frequency upon creation.
74
+ dtype : numpy.dtype or str, default None
75
+ Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``,
76
+ ``timedelta64[ms]``, and ``timedelta64[s]``.
77
+ copy : bool
78
+ Make a copy of input array.
79
+ name : object
80
+ Name to be stored in the index.
81
+
82
+ Attributes
83
+ ----------
84
+ days
85
+ seconds
86
+ microseconds
87
+ nanoseconds
88
+ components
89
+ inferred_freq
90
+
91
+ Methods
92
+ -------
93
+ to_pytimedelta
94
+ to_series
95
+ round
96
+ floor
97
+ ceil
98
+ to_frame
99
+ mean
100
+
101
+ See Also
102
+ --------
103
+ Index : The base pandas Index type.
104
+ Timedelta : Represents a duration between two dates or times.
105
+ DatetimeIndex : Index of datetime64 data.
106
+ PeriodIndex : Index of Period data.
107
+ timedelta_range : Create a fixed-frequency TimedeltaIndex.
108
+
109
+ Notes
110
+ -----
111
+ To learn more about the frequency strings, please see `this link
112
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
113
+
114
+ Examples
115
+ --------
116
+ >>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'])
117
+ TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
118
+ dtype='timedelta64[ns]', freq=None)
119
+
120
+ We can also let pandas infer the frequency when possible.
121
+
122
+ >>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq='infer')
123
+ TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
124
+ dtype='timedelta64[ns]', freq='D')
125
+ """
126
+
127
+ _typ = "timedeltaindex"
128
+
129
+ _data_cls = TimedeltaArray
130
+
131
+ @property
132
+ def _engine_type(self) -> type[libindex.TimedeltaEngine]:
133
+ return libindex.TimedeltaEngine
134
+
135
+ _data: TimedeltaArray
136
+
137
+ # Use base class method instead of DatetimeTimedeltaMixin._get_string_slice
138
+ _get_string_slice = Index._get_string_slice
139
+
140
+ # error: Signature of "_resolution_obj" incompatible with supertype
141
+ # "DatetimeIndexOpsMixin"
142
+ @property
143
+ def _resolution_obj(self) -> Resolution | None: # type: ignore[override]
144
+ return self._data._resolution_obj
145
+
146
+ # -------------------------------------------------------------------
147
+ # Constructors
148
+
149
+ def __new__(
150
+ cls,
151
+ data=None,
152
+ unit=lib.no_default,
153
+ freq=lib.no_default,
154
+ closed=lib.no_default,
155
+ dtype=None,
156
+ copy: bool = False,
157
+ name=None,
158
+ ):
159
+ if closed is not lib.no_default:
160
+ # GH#52628
161
+ warnings.warn(
162
+ f"The 'closed' keyword in {cls.__name__} construction is "
163
+ "deprecated and will be removed in a future version.",
164
+ FutureWarning,
165
+ stacklevel=find_stack_level(),
166
+ )
167
+
168
+ if unit is not lib.no_default:
169
+ # GH#55499
170
+ warnings.warn(
171
+ f"The 'unit' keyword in {cls.__name__} construction is "
172
+ "deprecated and will be removed in a future version. "
173
+ "Use pd.to_timedelta instead.",
174
+ FutureWarning,
175
+ stacklevel=find_stack_level(),
176
+ )
177
+ else:
178
+ unit = None
179
+
180
+ name = maybe_extract_name(name, data, cls)
181
+
182
+ if is_scalar(data):
183
+ cls._raise_scalar_data_error(data)
184
+
185
+ disallow_ambiguous_unit(unit)
186
+ if dtype is not None:
187
+ dtype = pandas_dtype(dtype)
188
+
189
+ if (
190
+ isinstance(data, TimedeltaArray)
191
+ and freq is lib.no_default
192
+ and (dtype is None or dtype == data.dtype)
193
+ ):
194
+ if copy:
195
+ data = data.copy()
196
+ return cls._simple_new(data, name=name)
197
+
198
+ if (
199
+ isinstance(data, TimedeltaIndex)
200
+ and freq is lib.no_default
201
+ and name is None
202
+ and (dtype is None or dtype == data.dtype)
203
+ ):
204
+ if copy:
205
+ return data.copy()
206
+ else:
207
+ return data._view()
208
+
209
+ # - Cases checked above all return/raise before reaching here - #
210
+
211
+ tdarr = TimedeltaArray._from_sequence_not_strict(
212
+ data, freq=freq, unit=unit, dtype=dtype, copy=copy
213
+ )
214
+ refs = None
215
+ if not copy and isinstance(data, (ABCSeries, Index)):
216
+ refs = data._references
217
+
218
+ return cls._simple_new(tdarr, name=name, refs=refs)
219
+
220
+ # -------------------------------------------------------------------
221
+
222
+ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
223
+ """
224
+ Can we compare values of the given dtype to our own?
225
+ """
226
+ return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype
227
+
228
+ # -------------------------------------------------------------------
229
+ # Indexing Methods
230
+
231
+ def get_loc(self, key):
232
+ """
233
+ Get integer location for requested label
234
+
235
+ Returns
236
+ -------
237
+ loc : int, slice, or ndarray[int]
238
+ """
239
+ self._check_indexing_error(key)
240
+
241
+ try:
242
+ key = self._data._validate_scalar(key, unbox=False)
243
+ except TypeError as err:
244
+ raise KeyError(key) from err
245
+
246
+ return Index.get_loc(self, key)
247
+
248
+ def _parse_with_reso(self, label: str):
249
+ # the "with_reso" is a no-op for TimedeltaIndex
250
+ parsed = Timedelta(label)
251
+ return parsed, None
252
+
253
+ def _parsed_string_to_bounds(self, reso, parsed: Timedelta):
254
+ # reso is unused, included to match signature of DTI/PI
255
+ lbound = parsed.round(parsed.resolution_string)
256
+ rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
257
+ return lbound, rbound
258
+
259
+ # -------------------------------------------------------------------
260
+
261
+ @property
262
+ def inferred_type(self) -> str:
263
+ return "timedelta64"
264
+
265
+
266
+ def timedelta_range(
267
+ start=None,
268
+ end=None,
269
+ periods: int | None = None,
270
+ freq=None,
271
+ name=None,
272
+ closed=None,
273
+ *,
274
+ unit: str | None = None,
275
+ ) -> TimedeltaIndex:
276
+ """
277
+ Return a fixed frequency TimedeltaIndex with day as the default.
278
+
279
+ Parameters
280
+ ----------
281
+ start : str or timedelta-like, default None
282
+ Left bound for generating timedeltas.
283
+ end : str or timedelta-like, default None
284
+ Right bound for generating timedeltas.
285
+ periods : int, default None
286
+ Number of periods to generate.
287
+ freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
288
+ Frequency strings can have multiples, e.g. '5h'.
289
+ name : str, default None
290
+ Name of the resulting TimedeltaIndex.
291
+ closed : str, default None
292
+ Make the interval closed with respect to the given frequency to
293
+ the 'left', 'right', or both sides (None).
294
+ unit : str, default None
295
+ Specify the desired resolution of the result.
296
+
297
+ .. versionadded:: 2.0.0
298
+
299
+ Returns
300
+ -------
301
+ TimedeltaIndex
302
+
303
+ Notes
304
+ -----
305
+ Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
306
+ exactly three must be specified. If ``freq`` is omitted, the resulting
307
+ ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
308
+ ``start`` and ``end`` (closed on both sides).
309
+
310
+ To learn more about the frequency strings, please see `this link
311
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
312
+
313
+ Examples
314
+ --------
315
+ >>> pd.timedelta_range(start='1 day', periods=4)
316
+ TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
317
+ dtype='timedelta64[ns]', freq='D')
318
+
319
+ The ``closed`` parameter specifies which endpoint is included. The default
320
+ behavior is to include both endpoints.
321
+
322
+ >>> pd.timedelta_range(start='1 day', periods=4, closed='right')
323
+ TimedeltaIndex(['2 days', '3 days', '4 days'],
324
+ dtype='timedelta64[ns]', freq='D')
325
+
326
+ The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
327
+ Only fixed frequencies can be passed, non-fixed frequencies such as
328
+ 'M' (month end) will raise.
329
+
330
+ >>> pd.timedelta_range(start='1 day', end='2 days', freq='6h')
331
+ TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
332
+ '1 days 18:00:00', '2 days 00:00:00'],
333
+ dtype='timedelta64[ns]', freq='6h')
334
+
335
+ Specify ``start``, ``end``, and ``periods``; the frequency is generated
336
+ automatically (linearly spaced).
337
+
338
+ >>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
339
+ TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
340
+ '5 days 00:00:00'],
341
+ dtype='timedelta64[ns]', freq=None)
342
+
343
+ **Specify a unit**
344
+
345
+ >>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s")
346
+ TimedeltaIndex(['1 days', '100001 days', '200001 days'],
347
+ dtype='timedelta64[s]', freq='100000D')
348
+ """
349
+ if freq is None and com.any_none(periods, start, end):
350
+ freq = "D"
351
+
352
+ freq = to_offset(freq)
353
+ tdarr = TimedeltaArray._generate_range(
354
+ start, end, periods, freq, closed=closed, unit=unit
355
+ )
356
+ return TimedeltaIndex._simple_new(tdarr, name=name)
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/describe.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/selectn.cpython-310.pyc ADDED
Binary file (6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/__pycache__/to_dict.cpython-310.pyc ADDED
Binary file (8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/describe.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module responsible for execution of NDFrame.describe() method.
3
+
4
+ Method NDFrame.describe() delegates actual execution to function describe_ndframe().
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from abc import (
9
+ ABC,
10
+ abstractmethod,
11
+ )
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Callable,
15
+ cast,
16
+ )
17
+
18
+ import numpy as np
19
+
20
+ from pandas._libs.tslibs import Timestamp
21
+ from pandas._typing import (
22
+ DtypeObj,
23
+ NDFrameT,
24
+ npt,
25
+ )
26
+ from pandas.util._validators import validate_percentile
27
+
28
+ from pandas.core.dtypes.common import (
29
+ is_bool_dtype,
30
+ is_numeric_dtype,
31
+ )
32
+ from pandas.core.dtypes.dtypes import (
33
+ ArrowDtype,
34
+ DatetimeTZDtype,
35
+ ExtensionDtype,
36
+ )
37
+
38
+ from pandas.core.arrays.floating import Float64Dtype
39
+ from pandas.core.reshape.concat import concat
40
+
41
+ from pandas.io.formats.format import format_percentiles
42
+
43
+ if TYPE_CHECKING:
44
+ from collections.abc import (
45
+ Hashable,
46
+ Sequence,
47
+ )
48
+
49
+ from pandas import (
50
+ DataFrame,
51
+ Series,
52
+ )
53
+
54
+
55
+ def describe_ndframe(
56
+ *,
57
+ obj: NDFrameT,
58
+ include: str | Sequence[str] | None,
59
+ exclude: str | Sequence[str] | None,
60
+ percentiles: Sequence[float] | np.ndarray | None,
61
+ ) -> NDFrameT:
62
+ """Describe series or dataframe.
63
+
64
+ Called from pandas.core.generic.NDFrame.describe()
65
+
66
+ Parameters
67
+ ----------
68
+ obj: DataFrame or Series
69
+ Either dataframe or series to be described.
70
+ include : 'all', list-like of dtypes or None (default), optional
71
+ A white list of data types to include in the result. Ignored for ``Series``.
72
+ exclude : list-like of dtypes or None (default), optional,
73
+ A black list of data types to omit from the result. Ignored for ``Series``.
74
+ percentiles : list-like of numbers, optional
75
+ The percentiles to include in the output. All should fall between 0 and 1.
76
+ The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and
77
+ 75th percentiles.
78
+
79
+ Returns
80
+ -------
81
+ Dataframe or series description.
82
+ """
83
+ percentiles = _refine_percentiles(percentiles)
84
+
85
+ describer: NDFrameDescriberAbstract
86
+ if obj.ndim == 1:
87
+ describer = SeriesDescriber(
88
+ obj=cast("Series", obj),
89
+ )
90
+ else:
91
+ describer = DataFrameDescriber(
92
+ obj=cast("DataFrame", obj),
93
+ include=include,
94
+ exclude=exclude,
95
+ )
96
+
97
+ result = describer.describe(percentiles=percentiles)
98
+ return cast(NDFrameT, result)
99
+
100
+
101
+ class NDFrameDescriberAbstract(ABC):
102
+ """Abstract class for describing dataframe or series.
103
+
104
+ Parameters
105
+ ----------
106
+ obj : Series or DataFrame
107
+ Object to be described.
108
+ """
109
+
110
+ def __init__(self, obj: DataFrame | Series) -> None:
111
+ self.obj = obj
112
+
113
+ @abstractmethod
114
+ def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series:
115
+ """Do describe either series or dataframe.
116
+
117
+ Parameters
118
+ ----------
119
+ percentiles : list-like of numbers
120
+ The percentiles to include in the output.
121
+ """
122
+
123
+
124
+ class SeriesDescriber(NDFrameDescriberAbstract):
125
+ """Class responsible for creating series description."""
126
+
127
+ obj: Series
128
+
129
+ def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series:
130
+ describe_func = select_describe_func(
131
+ self.obj,
132
+ )
133
+ return describe_func(self.obj, percentiles)
134
+
135
+
136
+ class DataFrameDescriber(NDFrameDescriberAbstract):
137
+ """Class responsible for creating dataobj description.
138
+
139
+ Parameters
140
+ ----------
141
+ obj : DataFrame
142
+ DataFrame to be described.
143
+ include : 'all', list-like of dtypes or None
144
+ A white list of data types to include in the result.
145
+ exclude : list-like of dtypes or None
146
+ A black list of data types to omit from the result.
147
+ """
148
+
149
+ obj: DataFrame
150
+
151
+ def __init__(
152
+ self,
153
+ obj: DataFrame,
154
+ *,
155
+ include: str | Sequence[str] | None,
156
+ exclude: str | Sequence[str] | None,
157
+ ) -> None:
158
+ self.include = include
159
+ self.exclude = exclude
160
+
161
+ if obj.ndim == 2 and obj.columns.size == 0:
162
+ raise ValueError("Cannot describe a DataFrame without columns")
163
+
164
+ super().__init__(obj)
165
+
166
+ def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame:
167
+ data = self._select_data()
168
+
169
+ ldesc: list[Series] = []
170
+ for _, series in data.items():
171
+ describe_func = select_describe_func(series)
172
+ ldesc.append(describe_func(series, percentiles))
173
+
174
+ col_names = reorder_columns(ldesc)
175
+ d = concat(
176
+ [x.reindex(col_names, copy=False) for x in ldesc],
177
+ axis=1,
178
+ sort=False,
179
+ )
180
+ d.columns = data.columns.copy()
181
+ return d
182
+
183
+ def _select_data(self) -> DataFrame:
184
+ """Select columns to be described."""
185
+ if (self.include is None) and (self.exclude is None):
186
+ # when some numerics are found, keep only numerics
187
+ default_include: list[npt.DTypeLike] = [np.number, "datetime"]
188
+ data = self.obj.select_dtypes(include=default_include)
189
+ if len(data.columns) == 0:
190
+ data = self.obj
191
+ elif self.include == "all":
192
+ if self.exclude is not None:
193
+ msg = "exclude must be None when include is 'all'"
194
+ raise ValueError(msg)
195
+ data = self.obj
196
+ else:
197
+ data = self.obj.select_dtypes(
198
+ include=self.include,
199
+ exclude=self.exclude,
200
+ )
201
+ return data
202
+
203
+
204
+ def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]:
205
+ """Set a convenient order for rows for display."""
206
+ names: list[Hashable] = []
207
+ seen_names: set[Hashable] = set()
208
+ ldesc_indexes = sorted((x.index for x in ldesc), key=len)
209
+ for idxnames in ldesc_indexes:
210
+ for name in idxnames:
211
+ if name not in seen_names:
212
+ seen_names.add(name)
213
+ names.append(name)
214
+ return names
215
+
216
+
217
+ def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:
218
+ """Describe series containing numerical data.
219
+
220
+ Parameters
221
+ ----------
222
+ series : Series
223
+ Series to be described.
224
+ percentiles : list-like of numbers
225
+ The percentiles to include in the output.
226
+ """
227
+ from pandas import Series
228
+
229
+ formatted_percentiles = format_percentiles(percentiles)
230
+
231
+ stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
232
+ d = (
233
+ [series.count(), series.mean(), series.std(), series.min()]
234
+ + series.quantile(percentiles).tolist()
235
+ + [series.max()]
236
+ )
237
+ # GH#48340 - always return float on non-complex numeric data
238
+ dtype: DtypeObj | None
239
+ if isinstance(series.dtype, ExtensionDtype):
240
+ if isinstance(series.dtype, ArrowDtype):
241
+ if series.dtype.kind == "m":
242
+ # GH53001: describe timedeltas with object dtype
243
+ dtype = None
244
+ else:
245
+ import pyarrow as pa
246
+
247
+ dtype = ArrowDtype(pa.float64())
248
+ else:
249
+ dtype = Float64Dtype()
250
+ elif series.dtype.kind in "iufb":
251
+ # i.e. numeric but exclude complex dtype
252
+ dtype = np.dtype("float")
253
+ else:
254
+ dtype = None
255
+ return Series(d, index=stat_index, name=series.name, dtype=dtype)
256
+
257
+
258
+ def describe_categorical_1d(
259
+ data: Series,
260
+ percentiles_ignored: Sequence[float],
261
+ ) -> Series:
262
+ """Describe series containing categorical data.
263
+
264
+ Parameters
265
+ ----------
266
+ data : Series
267
+ Series to be described.
268
+ percentiles_ignored : list-like of numbers
269
+ Ignored, but in place to unify interface.
270
+ """
271
+ names = ["count", "unique", "top", "freq"]
272
+ objcounts = data.value_counts()
273
+ count_unique = len(objcounts[objcounts != 0])
274
+ if count_unique > 0:
275
+ top, freq = objcounts.index[0], objcounts.iloc[0]
276
+ dtype = None
277
+ else:
278
+ # If the DataFrame is empty, set 'top' and 'freq' to None
279
+ # to maintain output shape consistency
280
+ top, freq = np.nan, np.nan
281
+ dtype = "object"
282
+
283
+ result = [data.count(), count_unique, top, freq]
284
+
285
+ from pandas import Series
286
+
287
+ return Series(result, index=names, name=data.name, dtype=dtype)
288
+
289
+
290
+ def describe_timestamp_as_categorical_1d(
291
+ data: Series,
292
+ percentiles_ignored: Sequence[float],
293
+ ) -> Series:
294
+ """Describe series containing timestamp data treated as categorical.
295
+
296
+ Parameters
297
+ ----------
298
+ data : Series
299
+ Series to be described.
300
+ percentiles_ignored : list-like of numbers
301
+ Ignored, but in place to unify interface.
302
+ """
303
+ names = ["count", "unique"]
304
+ objcounts = data.value_counts()
305
+ count_unique = len(objcounts[objcounts != 0])
306
+ result: list[float | Timestamp] = [data.count(), count_unique]
307
+ dtype = None
308
+ if count_unique > 0:
309
+ top, freq = objcounts.index[0], objcounts.iloc[0]
310
+ tz = data.dt.tz
311
+ asint = data.dropna().values.view("i8")
312
+ top = Timestamp(top)
313
+ if top.tzinfo is not None and tz is not None:
314
+ # Don't tz_localize(None) if key is already tz-aware
315
+ top = top.tz_convert(tz)
316
+ else:
317
+ top = top.tz_localize(tz)
318
+ names += ["top", "freq", "first", "last"]
319
+ result += [
320
+ top,
321
+ freq,
322
+ Timestamp(asint.min(), tz=tz),
323
+ Timestamp(asint.max(), tz=tz),
324
+ ]
325
+
326
+ # If the DataFrame is empty, set 'top' and 'freq' to None
327
+ # to maintain output shape consistency
328
+ else:
329
+ names += ["top", "freq"]
330
+ result += [np.nan, np.nan]
331
+ dtype = "object"
332
+
333
+ from pandas import Series
334
+
335
+ return Series(result, index=names, name=data.name, dtype=dtype)
336
+
337
+
338
+ def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
339
+ """Describe series containing datetime64 dtype.
340
+
341
+ Parameters
342
+ ----------
343
+ data : Series
344
+ Series to be described.
345
+ percentiles : list-like of numbers
346
+ The percentiles to include in the output.
347
+ """
348
+ # GH-30164
349
+ from pandas import Series
350
+
351
+ formatted_percentiles = format_percentiles(percentiles)
352
+
353
+ stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
354
+ d = (
355
+ [data.count(), data.mean(), data.min()]
356
+ + data.quantile(percentiles).tolist()
357
+ + [data.max()]
358
+ )
359
+ return Series(d, index=stat_index, name=data.name)
360
+
361
+
362
+ def select_describe_func(
363
+ data: Series,
364
+ ) -> Callable:
365
+ """Select proper function for describing series based on data type.
366
+
367
+ Parameters
368
+ ----------
369
+ data : Series
370
+ Series to be described.
371
+ """
372
+ if is_bool_dtype(data.dtype):
373
+ return describe_categorical_1d
374
+ elif is_numeric_dtype(data):
375
+ return describe_numeric_1d
376
+ elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype):
377
+ return describe_timestamp_1d
378
+ elif data.dtype.kind == "m":
379
+ return describe_numeric_1d
380
+ else:
381
+ return describe_categorical_1d
382
+
383
+
384
+ def _refine_percentiles(
385
+ percentiles: Sequence[float] | np.ndarray | None,
386
+ ) -> npt.NDArray[np.float64]:
387
+ """
388
+ Ensure that percentiles are unique and sorted.
389
+
390
+ Parameters
391
+ ----------
392
+ percentiles : list-like of numbers, optional
393
+ The percentiles to include in the output.
394
+ """
395
+ if percentiles is None:
396
+ return np.array([0.25, 0.5, 0.75])
397
+
398
+ # explicit conversion of `percentiles` to list
399
+ percentiles = list(percentiles)
400
+
401
+ # get them all to be in [0, 1]
402
+ validate_percentile(percentiles)
403
+
404
+ # median should always be included
405
+ if 0.5 not in percentiles:
406
+ percentiles.append(0.5)
407
+
408
+ percentiles = np.asarray(percentiles)
409
+
410
+ # sort and check for duplicates
411
+ unique_pcts = np.unique(percentiles)
412
+ assert percentiles is not None
413
+ if len(unique_pcts) < len(percentiles):
414
+ raise ValueError("percentiles cannot contain duplicates")
415
+
416
+ return unique_pcts
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/selectn.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of nlargest and nsmallest.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from collections.abc import (
8
+ Hashable,
9
+ Sequence,
10
+ )
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ cast,
14
+ final,
15
+ )
16
+
17
+ import numpy as np
18
+
19
+ from pandas._libs import algos as libalgos
20
+
21
+ from pandas.core.dtypes.common import (
22
+ is_bool_dtype,
23
+ is_complex_dtype,
24
+ is_integer_dtype,
25
+ is_list_like,
26
+ is_numeric_dtype,
27
+ needs_i8_conversion,
28
+ )
29
+ from pandas.core.dtypes.dtypes import BaseMaskedDtype
30
+
31
+ if TYPE_CHECKING:
32
+ from pandas._typing import (
33
+ DtypeObj,
34
+ IndexLabel,
35
+ )
36
+
37
+ from pandas import (
38
+ DataFrame,
39
+ Series,
40
+ )
41
+
42
+
43
+ class SelectN:
44
+ def __init__(self, obj, n: int, keep: str) -> None:
45
+ self.obj = obj
46
+ self.n = n
47
+ self.keep = keep
48
+
49
+ if self.keep not in ("first", "last", "all"):
50
+ raise ValueError('keep must be either "first", "last" or "all"')
51
+
52
+ def compute(self, method: str) -> DataFrame | Series:
53
+ raise NotImplementedError
54
+
55
+ @final
56
+ def nlargest(self):
57
+ return self.compute("nlargest")
58
+
59
+ @final
60
+ def nsmallest(self):
61
+ return self.compute("nsmallest")
62
+
63
+ @final
64
+ @staticmethod
65
+ def is_valid_dtype_n_method(dtype: DtypeObj) -> bool:
66
+ """
67
+ Helper function to determine if dtype is valid for
68
+ nsmallest/nlargest methods
69
+ """
70
+ if is_numeric_dtype(dtype):
71
+ return not is_complex_dtype(dtype)
72
+ return needs_i8_conversion(dtype)
73
+
74
+
75
+ class SelectNSeries(SelectN):
76
+ """
77
+ Implement n largest/smallest for Series
78
+
79
+ Parameters
80
+ ----------
81
+ obj : Series
82
+ n : int
83
+ keep : {'first', 'last'}, default 'first'
84
+
85
+ Returns
86
+ -------
87
+ nordered : Series
88
+ """
89
+
90
+ def compute(self, method: str) -> Series:
91
+ from pandas.core.reshape.concat import concat
92
+
93
+ n = self.n
94
+ dtype = self.obj.dtype
95
+ if not self.is_valid_dtype_n_method(dtype):
96
+ raise TypeError(f"Cannot use method '{method}' with dtype {dtype}")
97
+
98
+ if n <= 0:
99
+ return self.obj[[]]
100
+
101
+ dropped = self.obj.dropna()
102
+ nan_index = self.obj.drop(dropped.index)
103
+
104
+ # slow method
105
+ if n >= len(self.obj):
106
+ ascending = method == "nsmallest"
107
+ return self.obj.sort_values(ascending=ascending).head(n)
108
+
109
+ # fast method
110
+ new_dtype = dropped.dtype
111
+
112
+ # Similar to algorithms._ensure_data
113
+ arr = dropped._values
114
+ if needs_i8_conversion(arr.dtype):
115
+ arr = arr.view("i8")
116
+ elif isinstance(arr.dtype, BaseMaskedDtype):
117
+ arr = arr._data
118
+ else:
119
+ arr = np.asarray(arr)
120
+ if arr.dtype.kind == "b":
121
+ arr = arr.view(np.uint8)
122
+
123
+ if method == "nlargest":
124
+ arr = -arr
125
+ if is_integer_dtype(new_dtype):
126
+ # GH 21426: ensure reverse ordering at boundaries
127
+ arr -= 1
128
+
129
+ elif is_bool_dtype(new_dtype):
130
+ # GH 26154: ensure False is smaller than True
131
+ arr = 1 - (-arr)
132
+
133
+ if self.keep == "last":
134
+ arr = arr[::-1]
135
+
136
+ nbase = n
137
+ narr = len(arr)
138
+ n = min(n, narr)
139
+
140
+ # arr passed into kth_smallest must be contiguous. We copy
141
+ # here because kth_smallest will modify its input
142
+ # avoid OOB access with kth_smallest_c when n <= 0
143
+ if len(arr) > 0:
144
+ kth_val = libalgos.kth_smallest(arr.copy(order="C"), n - 1)
145
+ else:
146
+ kth_val = np.nan
147
+ (ns,) = np.nonzero(arr <= kth_val)
148
+ inds = ns[arr[ns].argsort(kind="mergesort")]
149
+
150
+ if self.keep != "all":
151
+ inds = inds[:n]
152
+ findex = nbase
153
+ else:
154
+ if len(inds) < nbase <= len(nan_index) + len(inds):
155
+ findex = len(nan_index) + len(inds)
156
+ else:
157
+ findex = len(inds)
158
+
159
+ if self.keep == "last":
160
+ # reverse indices
161
+ inds = narr - 1 - inds
162
+
163
+ return concat([dropped.iloc[inds], nan_index]).iloc[:findex]
164
+
165
+
166
+ class SelectNFrame(SelectN):
167
+ """
168
+ Implement n largest/smallest for DataFrame
169
+
170
+ Parameters
171
+ ----------
172
+ obj : DataFrame
173
+ n : int
174
+ keep : {'first', 'last'}, default 'first'
175
+ columns : list or str
176
+
177
+ Returns
178
+ -------
179
+ nordered : DataFrame
180
+ """
181
+
182
+ def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> None:
183
+ super().__init__(obj, n, keep)
184
+ if not is_list_like(columns) or isinstance(columns, tuple):
185
+ columns = [columns]
186
+
187
+ columns = cast(Sequence[Hashable], columns)
188
+ columns = list(columns)
189
+ self.columns = columns
190
+
191
+ def compute(self, method: str) -> DataFrame:
192
+ from pandas.core.api import Index
193
+
194
+ n = self.n
195
+ frame = self.obj
196
+ columns = self.columns
197
+
198
+ for column in columns:
199
+ dtype = frame[column].dtype
200
+ if not self.is_valid_dtype_n_method(dtype):
201
+ raise TypeError(
202
+ f"Column {repr(column)} has dtype {dtype}, "
203
+ f"cannot use method {repr(method)} with this dtype"
204
+ )
205
+
206
+ def get_indexer(current_indexer, other_indexer):
207
+ """
208
+ Helper function to concat `current_indexer` and `other_indexer`
209
+ depending on `method`
210
+ """
211
+ if method == "nsmallest":
212
+ return current_indexer.append(other_indexer)
213
+ else:
214
+ return other_indexer.append(current_indexer)
215
+
216
+ # Below we save and reset the index in case index contains duplicates
217
+ original_index = frame.index
218
+ cur_frame = frame = frame.reset_index(drop=True)
219
+ cur_n = n
220
+ indexer = Index([], dtype=np.int64)
221
+
222
+ for i, column in enumerate(columns):
223
+ # For each column we apply method to cur_frame[column].
224
+ # If it's the last column or if we have the number of
225
+ # results desired we are done.
226
+ # Otherwise there are duplicates of the largest/smallest
227
+ # value and we need to look at the rest of the columns
228
+ # to determine which of the rows with the largest/smallest
229
+ # value in the column to keep.
230
+ series = cur_frame[column]
231
+ is_last_column = len(columns) - 1 == i
232
+ values = getattr(series, method)(
233
+ cur_n, keep=self.keep if is_last_column else "all"
234
+ )
235
+
236
+ if is_last_column or len(values) <= cur_n:
237
+ indexer = get_indexer(indexer, values.index)
238
+ break
239
+
240
+ # Now find all values which are equal to
241
+ # the (nsmallest: largest)/(nlargest: smallest)
242
+ # from our series.
243
+ border_value = values == values[values.index[-1]]
244
+
245
+ # Some of these values are among the top-n
246
+ # some aren't.
247
+ unsafe_values = values[border_value]
248
+
249
+ # These values are definitely among the top-n
250
+ safe_values = values[~border_value]
251
+ indexer = get_indexer(indexer, safe_values.index)
252
+
253
+ # Go on and separate the unsafe_values on the remaining
254
+ # columns.
255
+ cur_frame = cur_frame.loc[unsafe_values.index]
256
+ cur_n = n - len(indexer)
257
+
258
+ frame = frame.take(indexer)
259
+
260
+ # Restore the index on frame
261
+ frame.index = original_index.take(indexer)
262
+
263
+ # If there is only one column, the frame is already sorted.
264
+ if len(columns) == 1:
265
+ return frame
266
+
267
+ ascending = method == "nsmallest"
268
+
269
+ return frame.sort_values(columns, ascending=ascending, kind="mergesort")
env-llmeval/lib/python3.10/site-packages/pandas/core/methods/to_dict.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Literal,
6
+ overload,
7
+ )
8
+ import warnings
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ lib,
14
+ missing as libmissing,
15
+ )
16
+ from pandas.util._exceptions import find_stack_level
17
+
18
+ from pandas.core.dtypes.cast import maybe_box_native
19
+ from pandas.core.dtypes.dtypes import (
20
+ BaseMaskedDtype,
21
+ ExtensionDtype,
22
+ )
23
+
24
+ from pandas.core import common as com
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas._typing import MutableMappingT
28
+
29
+ from pandas import DataFrame
30
+
31
+
32
+ @overload
33
+ def to_dict(
34
+ df: DataFrame,
35
+ orient: Literal["dict", "list", "series", "split", "tight", "index"] = ...,
36
+ *,
37
+ into: type[MutableMappingT] | MutableMappingT,
38
+ index: bool = ...,
39
+ ) -> MutableMappingT:
40
+ ...
41
+
42
+
43
+ @overload
44
+ def to_dict(
45
+ df: DataFrame,
46
+ orient: Literal["records"],
47
+ *,
48
+ into: type[MutableMappingT] | MutableMappingT,
49
+ index: bool = ...,
50
+ ) -> list[MutableMappingT]:
51
+ ...
52
+
53
+
54
+ @overload
55
+ def to_dict(
56
+ df: DataFrame,
57
+ orient: Literal["dict", "list", "series", "split", "tight", "index"] = ...,
58
+ *,
59
+ into: type[dict] = ...,
60
+ index: bool = ...,
61
+ ) -> dict:
62
+ ...
63
+
64
+
65
+ @overload
66
+ def to_dict(
67
+ df: DataFrame,
68
+ orient: Literal["records"],
69
+ *,
70
+ into: type[dict] = ...,
71
+ index: bool = ...,
72
+ ) -> list[dict]:
73
+ ...
74
+
75
+
76
+ # error: Incompatible default for argument "into" (default has type "type[dict
77
+ # [Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT")
78
+ def to_dict(
79
+ df: DataFrame,
80
+ orient: Literal[
81
+ "dict", "list", "series", "split", "tight", "records", "index"
82
+ ] = "dict",
83
+ *,
84
+ into: type[MutableMappingT] | MutableMappingT = dict, # type: ignore[assignment]
85
+ index: bool = True,
86
+ ) -> MutableMappingT | list[MutableMappingT]:
87
+ """
88
+ Convert the DataFrame to a dictionary.
89
+
90
+ The type of the key-value pairs can be customized with the parameters
91
+ (see below).
92
+
93
+ Parameters
94
+ ----------
95
+ orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'}
96
+ Determines the type of the values of the dictionary.
97
+
98
+ - 'dict' (default) : dict like {column -> {index -> value}}
99
+ - 'list' : dict like {column -> [values]}
100
+ - 'series' : dict like {column -> Series(values)}
101
+ - 'split' : dict like
102
+ {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
103
+ - 'tight' : dict like
104
+ {'index' -> [index], 'columns' -> [columns], 'data' -> [values],
105
+ 'index_names' -> [index.names], 'column_names' -> [column.names]}
106
+ - 'records' : list like
107
+ [{column -> value}, ... , {column -> value}]
108
+ - 'index' : dict like {index -> {column -> value}}
109
+
110
+ .. versionadded:: 1.4.0
111
+ 'tight' as an allowed value for the ``orient`` argument
112
+
113
+ into : class, default dict
114
+ The collections.abc.MutableMapping subclass used for all Mappings
115
+ in the return value. Can be the actual class or an empty
116
+ instance of the mapping type you want. If you want a
117
+ collections.defaultdict, you must pass it initialized.
118
+
119
+ index : bool, default True
120
+ Whether to include the index item (and index_names item if `orient`
121
+ is 'tight') in the returned dictionary. Can only be ``False``
122
+ when `orient` is 'split' or 'tight'.
123
+
124
+ .. versionadded:: 2.0.0
125
+
126
+ Returns
127
+ -------
128
+ dict, list or collections.abc.Mapping
129
+ Return a collections.abc.MutableMapping object representing the
130
+ DataFrame. The resulting transformation depends on the `orient` parameter.
131
+ """
132
+ if not df.columns.is_unique:
133
+ warnings.warn(
134
+ "DataFrame columns are not unique, some columns will be omitted.",
135
+ UserWarning,
136
+ stacklevel=find_stack_level(),
137
+ )
138
+ # GH16122
139
+ into_c = com.standardize_mapping(into)
140
+
141
+ # error: Incompatible types in assignment (expression has type "str",
142
+ # variable has type "Literal['dict', 'list', 'series', 'split', 'tight',
143
+ # 'records', 'index']")
144
+ orient = orient.lower() # type: ignore[assignment]
145
+
146
+ if not index and orient not in ["split", "tight"]:
147
+ raise ValueError(
148
+ "'index=False' is only valid when 'orient' is 'split' or 'tight'"
149
+ )
150
+
151
+ if orient == "series":
152
+ # GH46470 Return quickly if orient series to avoid creating dtype objects
153
+ return into_c((k, v) for k, v in df.items())
154
+
155
+ box_native_indices = [
156
+ i
157
+ for i, col_dtype in enumerate(df.dtypes.values)
158
+ if col_dtype == np.dtype(object) or isinstance(col_dtype, ExtensionDtype)
159
+ ]
160
+ box_na_values = [
161
+ lib.no_default if not isinstance(col_dtype, BaseMaskedDtype) else libmissing.NA
162
+ for i, col_dtype in enumerate(df.dtypes.values)
163
+ ]
164
+ are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes)
165
+
166
+ if orient == "dict":
167
+ return into_c((k, v.to_dict(into=into)) for k, v in df.items())
168
+
169
+ elif orient == "list":
170
+ object_dtype_indices_as_set: set[int] = set(box_native_indices)
171
+ return into_c(
172
+ (
173
+ k,
174
+ list(map(maybe_box_native, v.to_numpy(na_value=box_na_values[i])))
175
+ if i in object_dtype_indices_as_set
176
+ else list(map(maybe_box_native, v.to_numpy())),
177
+ )
178
+ for i, (k, v) in enumerate(df.items())
179
+ )
180
+
181
+ elif orient == "split":
182
+ data = df._create_data_for_split_and_tight_to_dict(
183
+ are_all_object_dtype_cols, box_native_indices
184
+ )
185
+
186
+ return into_c(
187
+ ((("index", df.index.tolist()),) if index else ())
188
+ + (
189
+ ("columns", df.columns.tolist()),
190
+ ("data", data),
191
+ )
192
+ )
193
+
194
+ elif orient == "tight":
195
+ data = df._create_data_for_split_and_tight_to_dict(
196
+ are_all_object_dtype_cols, box_native_indices
197
+ )
198
+
199
+ return into_c(
200
+ ((("index", df.index.tolist()),) if index else ())
201
+ + (
202
+ ("columns", df.columns.tolist()),
203
+ (
204
+ "data",
205
+ [
206
+ list(map(maybe_box_native, t))
207
+ for t in df.itertuples(index=False, name=None)
208
+ ],
209
+ ),
210
+ )
211
+ + ((("index_names", list(df.index.names)),) if index else ())
212
+ + (("column_names", list(df.columns.names)),)
213
+ )
214
+
215
+ elif orient == "records":
216
+ columns = df.columns.tolist()
217
+ if are_all_object_dtype_cols:
218
+ rows = (
219
+ dict(zip(columns, row)) for row in df.itertuples(index=False, name=None)
220
+ )
221
+ return [
222
+ into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows
223
+ ]
224
+ else:
225
+ data = [
226
+ into_c(zip(columns, t)) for t in df.itertuples(index=False, name=None)
227
+ ]
228
+ if box_native_indices:
229
+ object_dtype_indices_as_set = set(box_native_indices)
230
+ object_dtype_cols = {
231
+ col
232
+ for i, col in enumerate(df.columns)
233
+ if i in object_dtype_indices_as_set
234
+ }
235
+ for row in data:
236
+ for col in object_dtype_cols:
237
+ row[col] = maybe_box_native(row[col])
238
+ return data
239
+
240
+ elif orient == "index":
241
+ if not df.index.is_unique:
242
+ raise ValueError("DataFrame index must be unique for orient='index'.")
243
+ columns = df.columns.tolist()
244
+ if are_all_object_dtype_cols:
245
+ return into_c(
246
+ (t[0], dict(zip(df.columns, map(maybe_box_native, t[1:]))))
247
+ for t in df.itertuples(name=None)
248
+ )
249
+ elif box_native_indices:
250
+ object_dtype_indices_as_set = set(box_native_indices)
251
+ is_object_dtype_by_index = [
252
+ i in object_dtype_indices_as_set for i in range(len(df.columns))
253
+ ]
254
+ return into_c(
255
+ (
256
+ t[0],
257
+ {
258
+ columns[i]: maybe_box_native(v)
259
+ if is_object_dtype_by_index[i]
260
+ else v
261
+ for i, v in enumerate(t[1:])
262
+ },
263
+ )
264
+ for t in df.itertuples(name=None)
265
+ )
266
+ else:
267
+ return into_c(
268
+ (t[0], dict(zip(df.columns, t[1:]))) for t in df.itertuples(name=None)
269
+ )
270
+
271
+ else:
272
+ raise ValueError(f"orient '{orient}' not understood")
env-llmeval/lib/python3.10/site-packages/pandas/core/reshape/__init__.py ADDED
File without changes