index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
65,379 |
pandas.core.indexes.base
|
__nonzero__
| null |
@final
def __nonzero__(self) -> NoReturn:
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
|
(self) -> NoReturn
|
65,380 |
pandas.core.indexes.category
|
__contains__
|
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
|
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_valid_na_for_dtype(key, self.categories.dtype):
return self.hasnans
return contains(self, key, container=self._engine)
|
(self, key: Any) -> bool
|
65,381 |
pandas.core.indexes.base
|
__copy__
| null |
@final
def __copy__(self, **kwargs) -> Self:
return self.copy(**kwargs)
|
(self, **kwargs) -> NoneType
|
65,382 |
pandas.core.indexes.base
|
__deepcopy__
|
Parameters
----------
memo, default None
Standard signature. Unused
|
@final
def __deepcopy__(self, memo=None) -> Self:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
|
(self, memo=None) -> NoneType
|
65,388 |
pandas.core.indexes.base
|
__getitem__
|
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
|
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
getitem = self._data.__getitem__
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
key = com.cast_scalar_indexer(key)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization com.is_bool_indexer and ndim checks.
return self._getitem_slice(key)
if com.is_bool_indexer(key):
# if we have list[bools, length=1e5] then doing this check+convert
# takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__
# time below from 3.8 ms to 496 µs
# if we already have ndarray[bool], the overhead is 1.4 µs or .25%
if isinstance(getattr(key, "dtype", None), ExtensionDtype):
key = key.to_numpy(dtype=bool, na_value=False)
else:
key = np.asarray(key, dtype=bool)
if not isinstance(self.dtype, ExtensionDtype):
if len(key) == 0 and len(key) != len(self):
warnings.warn(
"Using a boolean indexer with length 0 on an Index with "
"length greater than 0 is deprecated and will raise in a "
"future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
disallow_ndim_indexing(result)
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
return self._constructor._simple_new(result, name=self._name)
|
(self, key)
|
65,390 |
pandas.core.indexes.base
|
__iadd__
| null |
def __iadd__(self, other):
# alias for __add__
return self + other
|
(self, other)
|
65,391 |
pandas.core.indexes.base
|
__invert__
| null |
def __invert__(self) -> Index:
# GH#8875
return self._unary_method(operator.inv)
|
(self) -> pandas.core.indexes.base.Index
|
65,392 |
pandas.core.base
|
__iter__
|
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> for x in s:
... print(x)
1
2
3
|
def __iter__(self) -> Iterator:
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> for x in s:
... print(x)
1
2
3
"""
# We are explicitly making element iterators.
if not isinstance(self._values, np.ndarray):
# Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
|
(self) -> 'Iterator'
|
65,394 |
pandas.core.indexes.base
|
__len__
|
Return the length of the Index.
|
def __len__(self) -> int:
"""
Return the length of the Index.
"""
return len(self._data)
|
(self) -> int
|
65,399 |
pandas.core.indexes.base
|
__neg__
| null |
def __neg__(self) -> Index:
return self._unary_method(operator.neg)
|
(self) -> pandas.core.indexes.base.Index
|
65,400 |
pandas.core.indexes.category
|
__new__
| null |
def __new__(
cls,
data=None,
categories=None,
ordered=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable | None = None,
) -> Self:
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
# GH#38944 include None here, which pre-2.0 subbed in []
cls._raise_scalar_data_error(data)
data = Categorical(
data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
)
return cls._simple_new(data, name=name)
|
(cls, data=None, categories=None, ordered=None, dtype: 'Dtype | None' = None, copy: 'bool' = False, name: 'Hashable | None' = None) -> 'Self'
|
65,403 |
pandas.core.indexes.base
|
__pos__
| null |
def __pos__(self) -> Index:
return self._unary_method(operator.pos)
|
(self) -> pandas.core.indexes.base.Index
|
65,408 |
pandas.core.indexes.base
|
__reduce__
| null |
def __reduce__(self):
d = {"data": self._data, "name": self.name}
return _new_Index, (type(self), d), None
|
(self)
|
65,409 |
pandas.core.indexes.base
|
__repr__
|
Return a string representation for this object.
|
@final
def __repr__(self) -> str_t:
"""
Return a string representation for this object.
"""
klass_name = type(self).__name__
data = self._format_data()
attrs = self._format_attrs()
attrs_str = [f"{k}={v}" for k, v in attrs]
prepr = ", ".join(attrs_str)
return f"{klass_name}({data}{prepr})"
|
(self) -> str
|
65,418 |
pandas.core.indexes.base
|
__setitem__
| null |
@final
def __setitem__(self, key, value) -> None:
raise TypeError("Index does not support mutable operations")
|
(self, key, value) -> NoneType
|
65,423 |
pandas.core.indexes.base
|
_arith_method
| null |
def _arith_method(self, other, op):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
return super()._arith_method(other, op)
|
(self, other, op)
|
65,424 |
pandas.core.indexes.base
|
_assert_can_do_setop
| null |
@final
def _assert_can_do_setop(self, other) -> bool:
if not is_list_like(other):
raise TypeError("Input must be Index or array-like")
return True
|
(self, other) -> bool
|
65,425 |
pandas.core.indexes.base
|
_can_hold_identifiers_and_holds_name
|
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
|
@final
def _can_hold_identifiers_and_holds_name(self, name) -> bool:
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if (
is_object_dtype(self.dtype)
or is_string_dtype(self.dtype)
or isinstance(self.dtype, CategoricalDtype)
):
return name in self
return False
|
(self, name) -> bool
|
65,426 |
pandas.core.indexes.base
|
_check_indexing_error
| null |
def _check_indexing_error(self, key):
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
raise InvalidIndexError(key)
|
(self, key)
|
65,427 |
pandas.core.indexes.base
|
_check_indexing_method
|
Raise if we have a get_indexer `method` that is not supported or valid.
|
@final
def _check_indexing_method(
self,
method: str_t | None,
limit: int | None = None,
tolerance=None,
) -> None:
"""
Raise if we have a get_indexer `method` that is not supported or valid.
"""
if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]:
# in practice the clean_reindex_fill_method call would raise
# before we get here
raise ValueError("Invalid fill method") # pragma: no cover
if self._is_multi:
if method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
if method in ("pad", "backfill"):
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
if isinstance(self.dtype, (IntervalDtype, CategoricalDtype)):
# GH#37871 for now this is only for IntervalIndex and CategoricalIndex
if method is not None:
raise NotImplementedError(
f"method {method} not yet implemented for {type(self).__name__}"
)
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if doing pad, "
"backfill or nearest reindexing"
)
if limit is not None:
raise ValueError(
"limit argument only valid if doing pad, "
"backfill or nearest reindexing"
)
|
(self, method: str | None, limit: Optional[int] = None, tolerance=None) -> NoneType
|
65,428 |
pandas.core.indexes.base
|
_cleanup
| null |
@final
def _cleanup(self) -> None:
self._engine.clear_mapping()
|
(self) -> NoneType
|
65,429 |
pandas.core.indexes.base
|
_cmp_method
|
Wrapper used to dispatch comparison operations.
|
def _cmp_method(self, other, op):
"""
Wrapper used to dispatch comparison operations.
"""
if self.is_(other):
# fastpath
if op in {operator.eq, operator.le, operator.ge}:
arr = np.ones(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
# TODO: should set MultiIndex._can_hold_na = False?
arr[self.isna()] = False
return arr
elif op is operator.ne:
arr = np.zeros(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
arr[self.isna()] = True
return arr
if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(
self
) != len(other):
raise ValueError("Lengths must match to compare")
if not isinstance(other, ABCMultiIndex):
other = extract_array(other, extract_numpy=True)
else:
other = np.asarray(other)
if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):
# e.g. PeriodArray, Categorical
result = op(self._values, other)
elif isinstance(self._values, ExtensionArray):
result = op(self._values, other)
elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex):
# don't pass MultiIndex
result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)
else:
result = ops.comparison_op(self._values, other, op)
return result
|
(self, other, op)
|
65,430 |
pandas.core.indexes.category
|
_concat
| null |
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
# if calling index is category, don't check dtype of others
try:
cat = Categorical._concat_same_type(
[self._is_dtype_compat(c) for c in to_concat]
)
except TypeError:
# not all to_concat elements are among our categories (or NA)
res = concat_compat([x._values for x in to_concat])
return Index(res, name=name)
else:
return type(self)._simple_new(cat, name=name)
|
(self, to_concat: 'list[Index]', name: 'Hashable') -> 'Index'
|
65,431 |
pandas.core.indexes.base
|
_construct_result
| null |
@final
def _construct_result(self, result, name):
if isinstance(result, tuple):
return (
Index(result[0], name=name, dtype=result[0].dtype),
Index(result[1], name=name, dtype=result[1].dtype),
)
return Index(result, name=name, dtype=result.dtype)
|
(self, result, name)
|
65,432 |
pandas.core.indexes.base
|
_convert_can_do_setop
| null |
def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = get_op_result_name(self, other)
return other, result_name
|
(self, other) -> 'tuple[Index, Hashable]'
|
65,433 |
pandas.core.indexes.base
|
_convert_slice_indexer
|
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'loc', 'getitem'}
|
def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
"""
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'loc', 'getitem'}
"""
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
is_index_slice = is_valid_positional_slice(key)
# TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able
# to simplify this.
if lib.is_np_dtype(self.dtype, "f"):
# We always treat __getitem__ slicing as label-based
# translate to locations
if kind == "getitem" and is_index_slice and not start == stop and step != 0:
# exclude step=0 from the warning because it will raise anyway
# start/stop both None e.g. [:] or [::-1] won't change.
# exclude start==stop since it will be empty either way, or
# will be [:] or [::-1] which won't change
warnings.warn(
# GH#49612
"The behavior of obj[i:j] with a float-dtype index is "
"deprecated. In a future version, this will be treated as "
"positional instead of label-based. For label-based slicing, "
"use obj.loc[i:j] instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.slice_indexer(start, stop, step)
if kind == "getitem":
# called from the getitem slicers, validate that we are in fact integers
if is_index_slice:
# In this case the _validate_indexer checks below are redundant
return key
elif self.dtype.kind in "iu":
# Note: these checks are redundant if we know is_index_slice
self._validate_indexer("slice", key.start, "getitem")
self._validate_indexer("slice", key.stop, "getitem")
self._validate_indexer("slice", key.step, "getitem")
return key
# convert the slice to an indexer here; checking that the user didn't
# pass a positional slice to loc
is_positional = is_index_slice and self._should_fallback_to_positional
# if we are mixed and have integers
if is_positional:
try:
# Validate start & stop
if start is not None:
self.get_loc(start)
if stop is not None:
self.get_loc(stop)
is_positional = False
except KeyError:
pass
if com.is_null_slice(key):
# It doesn't matter if we are positional or label based
indexer = key
elif is_positional:
if kind == "loc":
# GH#16121, GH#24612, GH#31810
raise TypeError(
"Slicing a positional slice with .loc is not allowed, "
"Use .loc with labels or .iloc with positions instead.",
)
indexer = key
else:
indexer = self.slice_indexer(start, stop, step)
return indexer
|
(self, key: slice, kind: Literal['loc', 'getitem'])
|
65,434 |
pandas.core.indexes.base
|
_convert_tolerance
| null |
def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray:
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype "
f"{self.dtype} must contain numeric elements if it is list type"
)
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype {self.dtype} "
f"must be numeric if it is a scalar: {repr(tolerance)}"
)
return tolerance
|
(self, tolerance, target: numpy.ndarray | pandas.core.indexes.base.Index) -> numpy.ndarray
|
65,435 |
pandas.core.indexes.base
|
_difference
| null |
def _difference(self, other, sort):
# overridden by RangeIndex
this = self
if isinstance(self, ABCCategoricalIndex) and self.hasnans and other.hasnans:
this = this.dropna()
other = other.unique()
the_diff = this[other.get_indexer_for(this) == -1]
the_diff = the_diff if this.is_unique else the_diff.unique()
the_diff = _maybe_try_sort(the_diff, sort)
return the_diff
|
(self, other, sort)
|
65,436 |
pandas.core.indexes.base
|
_difference_compat
| null |
@final
def _difference_compat(
self, target: Index, indexer: npt.NDArray[np.intp]
) -> ArrayLike:
# Compatibility for PeriodArray, for which __sub__ returns an ndarray[object]
# of DateOffset objects, which do not support __abs__ (and would be slow
# if they did)
if isinstance(self.dtype, PeriodDtype):
# Note: we only get here with matching dtypes
own_values = cast("PeriodArray", self._data)._ndarray
target_values = cast("PeriodArray", target._data)._ndarray
diff = own_values[indexer] - target_values
else:
# error: Unsupported left operand type for - ("ExtensionArray")
diff = self._values[indexer] - target._values # type: ignore[operator]
return abs(diff)
|
(self, target: 'Index', indexer: 'npt.NDArray[np.intp]') -> 'ArrayLike'
|
65,439 |
pandas.core.indexes.base
|
_drop_level_numbers
|
Drop MultiIndex levels by level _number_, not name.
|
@final
def _drop_level_numbers(self, levnums: list[int]):
"""
Drop MultiIndex levels by level _number_, not name.
"""
if not levnums and not isinstance(self, ABCMultiIndex):
return self
if len(levnums) >= self.nlevels:
raise ValueError(
f"Cannot remove {len(levnums)} levels from an index with "
f"{self.nlevels} levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
self = cast("MultiIndex", self)
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_codes.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
lev = new_levels[0]
if len(lev) == 0:
# If lev is empty, lev.take will fail GH#42055
if len(new_codes[0]) == 0:
# GH#45230 preserve RangeIndex here
# see test_reset_index_empty_rangeindex
result = lev[:0]
else:
res_values = algos.take(lev._values, new_codes[0], allow_fill=True)
# _constructor instead of type(lev) for RangeIndex compat GH#35230
result = lev._constructor._simple_new(res_values, name=new_names[0])
else:
# set nan if needed
mask = new_codes[0] == -1
result = new_levels[0].take(new_codes[0])
if mask.any():
result = result.putmask(mask, np.nan)
result._name = new_names[0]
return result
else:
from pandas.core.indexes.multi import MultiIndex
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=new_names,
verify_integrity=False,
)
|
(self, levnums: list[int])
|
65,440 |
pandas.core.indexes.base
|
_dti_setop_align_tzs
|
With mismatched timezones, cast both to UTC.
|
@final
def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]:
"""
With mismatched timezones, cast both to UTC.
"""
# Caller is responsibelf or checking
# `self.dtype != other.dtype`
if (
isinstance(self, ABCDatetimeIndex)
and isinstance(other, ABCDatetimeIndex)
and self.tz is not None
and other.tz is not None
):
# GH#39328, GH#45357
left = self.tz_convert("UTC")
right = other.tz_convert("UTC")
return left, right
return self, other
|
(self, other: pandas.core.indexes.base.Index, setop: str) -> tuple[pandas.core.indexes.base.Index, pandas.core.indexes.base.Index]
|
65,441 |
pandas.core.base
|
_duplicated
| null |
@final
def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
arr = self._values
if isinstance(arr, ExtensionArray):
return arr.duplicated(keep=keep)
return algorithms.duplicated(arr, keep=keep)
|
(self, keep: 'DropKeep' = 'first') -> 'npt.NDArray[np.bool_]'
|
65,442 |
pandas.core.indexes.base
|
_filter_indexer_tolerance
| null |
@final
def _filter_indexer_tolerance(
self,
target: Index,
indexer: npt.NDArray[np.intp],
tolerance,
) -> npt.NDArray[np.intp]:
distance = self._difference_compat(target, indexer)
return np.where(distance <= tolerance, indexer, -1)
|
(self, target: 'Index', indexer: 'npt.NDArray[np.intp]', tolerance) -> 'npt.NDArray[np.intp]'
|
65,443 |
pandas.core.indexes.base
|
_find_common_type_compat
|
Implementation of find_common_type that adjusts for Index-specific
special cases.
|
@final
def _find_common_type_compat(self, target) -> DtypeObj:
"""
Implementation of find_common_type that adjusts for Index-specific
special cases.
"""
target_dtype, _ = infer_dtype_from(target)
# special case: if one dtype is uint64 and the other a signed int, return object
# See https://github.com/pandas-dev/pandas/issues/26778 for discussion
# Now it's:
# * float | [u]int -> float
# * uint64 | signed int -> object
# We may change union(float | [u]int) to go to object.
if self.dtype == "uint64" or target_dtype == "uint64":
if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(
target_dtype
):
return _dtype_obj
dtype = find_result_type(self.dtype, target)
dtype = common_dtype_categorical_compat([self, target], dtype)
return dtype
|
(self, target) -> Union[numpy.dtype, pandas.core.dtypes.base.ExtensionDtype]
|
65,444 |
pandas.core.indexes.category
|
_format_attrs
|
Return a list of tuples of the (attr,formatted_value)
|
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs: list[tuple[str, str | int | bool | None]]
attrs = [
(
"categories",
f"[{', '.join(self._data._repr_categories())}]",
),
("ordered", self.ordered),
]
extra = super()._format_attrs()
return attrs + extra
|
(self)
|
65,445 |
pandas.core.indexes.base
|
_format_data
|
Return the formatted data as a unicode string.
|
@final
def _format_data(self, name=None) -> str_t:
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = True
if self.inferred_type == "string":
is_justify = False
elif isinstance(self.dtype, CategoricalDtype):
self = cast("CategoricalIndex", self)
if is_object_dtype(self.categories.dtype):
is_justify = False
elif isinstance(self, ABCRangeIndex):
# We will do the relevant formatting via attrs
return ""
return format_object_summary(
self,
self._formatter_func,
is_justify=is_justify,
name=name,
line_break_each_value=self._is_multi,
)
|
(self, name=None) -> str
|
65,446 |
pandas.core.indexes.base
|
_format_duplicate_message
|
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'a'])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
|
@final
def _format_duplicate_message(self) -> DataFrame:
"""
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'a'])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
"""
from pandas import Series
duplicates = self[self.duplicated(keep="first")].unique()
assert len(duplicates)
out = (
Series(np.arange(len(self)), copy=False)
.groupby(self, observed=False)
.agg(list)[duplicates]
)
if self._is_multi:
# test_format_duplicate_labels_message_multi
# error: "Type[Index]" has no attribute "from_tuples" [attr-defined]
out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]
if self.nlevels == 1:
out = out.rename_axis("label")
return out.to_frame(name="positions")
|
(self) -> 'DataFrame'
|
65,447 |
pandas.core.indexes.base
|
_format_flat
|
Render a string representation of the Index.
|
@final
def _format_flat(
self,
*,
include_name: bool,
formatter: Callable | None = None,
) -> list[str_t]:
"""
Render a string representation of the Index.
"""
header = []
if include_name:
header.append(
pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header=header, na_rep=self._default_na_rep)
|
(self, *, include_name: bool, formatter: Optional[Callable] = None) -> list[str]
|
65,448 |
pandas.core.indexes.base
|
_format_with_header
| null |
def _format_with_header(self, *, header: list[str_t], na_rep: str_t) -> list[str_t]:
from pandas.io.formats.format import format_array
values = self._values
if (
is_object_dtype(values.dtype)
or is_string_dtype(values.dtype)
or isinstance(self.dtype, (IntervalDtype, CategoricalDtype))
):
# TODO: why do we need different justify for these cases?
justify = "all"
else:
justify = "left"
# passing leading_space=False breaks test_format_missing,
# test_index_repr_in_frame_with_nan, but would otherwise make
# trim_front unnecessary
formatted = format_array(values, None, justify=justify)
result = trim_front(formatted)
return header + result
|
(self, *, header: list[str], na_rep: str) -> list[str]
|
65,449 |
pandas.core.indexes.extension
|
_from_join_target
| null |
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
assert result.dtype == self._data._ndarray.dtype
return self._data._from_backing_data(result)
|
(self, result: 'np.ndarray') -> 'ArrayLike'
|
65,450 |
pandas.core.indexes.base
|
_get_default_index_names
|
Get names of index.
Parameters
----------
names : int, str or 1-dimensional list, default None
Index names to set.
default : str
Default name of index.
Raises
------
TypeError
if names not str or list-like
|
def _get_default_index_names(
self, names: Hashable | Sequence[Hashable] | None = None, default=None
) -> list[Hashable]:
"""
Get names of index.
Parameters
----------
names : int, str or 1-dimensional list, default None
Index names to set.
default : str
Default name of index.
Raises
------
TypeError
if names not str or list-like
"""
from pandas.core.indexes.multi import MultiIndex
if names is not None:
if isinstance(names, (int, str)):
names = [names]
if not isinstance(names, list) and names is not None:
raise ValueError("Index names must be str or 1-dimensional list")
if not names:
if isinstance(self, MultiIndex):
names = com.fill_missing_names(self.names)
else:
names = [default] if self.name is None else [self.name]
return names
|
(self, names: 'Hashable | Sequence[Hashable] | None' = None, default=None) -> 'list[Hashable]'
|
65,451 |
pandas.core.indexes.extension
|
_get_engine_target
| null |
def _get_engine_target(self) -> np.ndarray:
return self._data._ndarray
|
(self) -> 'np.ndarray'
|
65,452 |
pandas.core.indexes.base
|
_get_fill_indexer
| null |
@final
def _get_fill_indexer(
self, target: Index, method: str_t, limit: int | None = None, tolerance=None
) -> npt.NDArray[np.intp]:
if self._is_multi:
if not (self.is_monotonic_increasing or self.is_monotonic_decreasing):
raise ValueError("index must be monotonic increasing or decreasing")
encoded = self.append(target)._engine.values # type: ignore[union-attr]
self_encoded = Index(encoded[: len(self)])
target_encoded = Index(encoded[len(self) :])
return self_encoded._get_fill_indexer(
target_encoded, method, limit, tolerance
)
if self.is_monotonic_increasing and target.is_monotonic_increasing:
target_values = target._get_engine_target()
own_values = self._get_engine_target()
if not isinstance(target_values, np.ndarray) or not isinstance(
own_values, np.ndarray
):
raise NotImplementedError
if method == "pad":
indexer = libalgos.pad(own_values, target_values, limit=limit)
else:
# i.e. "backfill"
indexer = libalgos.backfill(own_values, target_values, limit=limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None and len(self):
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
|
(self, target: 'Index', method: 'str_t', limit: 'int | None' = None, tolerance=None) -> 'npt.NDArray[np.intp]'
|
65,453 |
pandas.core.indexes.base
|
_get_fill_indexer_searchsorted
|
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
|
@final
def _get_fill_indexer_searchsorted(
self, target: Index, method: str_t, limit: int | None = None
) -> npt.NDArray[np.intp]:
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError(
f"limit argument for {repr(method)} method only well-defined "
"if index and target are monotonic"
)
side: Literal["left", "right"] = "left" if method == "pad" else "right"
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = indexer == -1
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
if side == "left":
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
|
(self, target: 'Index', method: 'str_t', limit: 'int | None' = None) -> 'npt.NDArray[np.intp]'
|
65,454 |
pandas.core.indexes.base
|
_get_indexer
| null |
def _get_indexer(
self,
target: Index,
method: str_t | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if method in ["pad", "backfill"]:
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == "nearest":
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if target._is_multi and self._is_multi:
engine = self._engine
# error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]"
# has no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes( # type: ignore[union-attr]
target
)
else:
tgt_values = target._get_engine_target()
indexer = self._engine.get_indexer(tgt_values)
return ensure_platform_int(indexer)
|
(self, target: 'Index', method: 'str_t | None' = None, limit: 'int | None' = None, tolerance=None) -> 'npt.NDArray[np.intp]'
|
65,455 |
pandas.core.indexes.base
|
_get_indexer_non_comparable
|
Called from get_indexer or get_indexer_non_unique when the target
is of a non-comparable dtype.
For get_indexer lookups with method=None, get_indexer is an _equality_
check, so non-comparable dtypes mean we will always have no matches.
For get_indexer lookups with a method, get_indexer is an _inequality_
check, so non-comparable dtypes mean we will always raise TypeError.
Parameters
----------
target : Index
method : str or None
unique : bool, default True
* True if called from get_indexer.
* False if called from get_indexer_non_unique.
Raises
------
TypeError
If doing an inequality check, i.e. method is not None.
|
@final
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Called from get_indexer or get_indexer_non_unique when the target
is of a non-comparable dtype.
For get_indexer lookups with method=None, get_indexer is an _equality_
check, so non-comparable dtypes mean we will always have no matches.
For get_indexer lookups with a method, get_indexer is an _inequality_
check, so non-comparable dtypes mean we will always raise TypeError.
Parameters
----------
target : Index
method : str or None
unique : bool, default True
* True if called from get_indexer.
* False if called from get_indexer_non_unique.
Raises
------
TypeError
If doing an inequality check, i.e. method is not None.
"""
if method is not None:
other_dtype = _unpack_nested_dtype(target)
raise TypeError(f"Cannot compare dtypes {self.dtype} and {other_dtype}")
no_matches = -1 * np.ones(target.shape, dtype=np.intp)
if unique:
# This is for get_indexer
return no_matches
else:
# This is for get_indexer_non_unique
missing = np.arange(len(target), dtype=np.intp)
return no_matches, missing
|
(self, target: 'Index', method, unique: 'bool' = True) -> 'npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]'
|
65,456 |
pandas.core.indexes.base
|
_get_indexer_strict
|
Analogue to get_indexer that raises if any elements are missing.
|
def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:
"""
Analogue to get_indexer that raises if any elements are missing.
"""
keyarr = key
if not isinstance(keyarr, Index):
keyarr = com.asarray_tuplesafe(keyarr)
if self._index_as_unique:
indexer = self.get_indexer_for(keyarr)
keyarr = self.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)
self._raise_if_missing(keyarr, indexer, axis_name)
keyarr = self.take(indexer)
if isinstance(key, Index):
# GH 42790 - Preserve name from an Index
keyarr.name = key.name
if lib.is_np_dtype(keyarr.dtype, "mM") or isinstance(
keyarr.dtype, DatetimeTZDtype
):
# DTI/TDI.take can infer a freq in some cases when we dont want one
if isinstance(key, list) or (
isinstance(key, type(self))
# "Index" has no attribute "freq"
and key.freq is None # type: ignore[attr-defined]
):
keyarr = keyarr._with_freq(None)
return keyarr, indexer
|
(self, key, axis_name: str) -> tuple[pandas.core.indexes.base.Index, numpy.ndarray]
|
65,457 |
pandas.core.indexes.base
|
_get_join_target
|
Get the ndarray or ExtensionArray that we can pass to the join
functions.
|
@final
def _get_join_target(self) -> np.ndarray:
"""
Get the ndarray or ExtensionArray that we can pass to the join
functions.
"""
if isinstance(self._values, BaseMaskedArray):
# This is only used if our array is monotonic, so no NAs present
return self._values._data
elif isinstance(self._values, ArrowExtensionArray):
# This is only used if our array is monotonic, so no missing values
# present
return self._values.to_numpy()
# TODO: exclude ABCRangeIndex case here as it copies
target = self._get_engine_target()
if not isinstance(target, np.ndarray):
raise ValueError("_can_use_libjoin should return False.")
return target
|
(self) -> numpy.ndarray
|
65,458 |
pandas.core.indexes.base
|
_get_level_names
|
Return a name or list of names with None replaced by the level number.
|
@final
def _get_level_names(self) -> Hashable | Sequence[Hashable]:
"""
Return a name or list of names with None replaced by the level number.
"""
if self._is_multi:
return [
level if name is None else name for level, name in enumerate(self.names)
]
else:
return 0 if self.name is None else self.name
|
(self) -> 'Hashable | Sequence[Hashable]'
|
65,459 |
pandas.core.indexes.base
|
_get_level_number
| null |
def _get_level_number(self, level) -> int:
self._validate_index_level(level)
return 0
|
(self, level) -> int
|
65,460 |
pandas.core.indexes.base
|
_get_level_values
|
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list('abc'))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
|
def _get_level_values(self, level) -> Index:
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list('abc'))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
"""
self._validate_index_level(level)
return self
|
(self, level) -> pandas.core.indexes.base.Index
|
65,461 |
pandas.core.indexes.base
|
_get_names
| null |
def _get_names(self) -> FrozenList:
return FrozenList((self.name,))
|
(self) -> pandas.core.indexes.frozen.FrozenList
|
65,462 |
pandas.core.indexes.base
|
_get_nearest_indexer
|
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
|
@final
def _get_nearest_indexer(
self, target: Index, limit: int | None, tolerance
) -> npt.NDArray[np.intp]:
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
if not len(self):
return self._get_fill_indexer(target, "pad")
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
left_distances = self._difference_compat(target, left_indexer)
right_distances = self._difference_compat(target, right_indexer)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(
# error: Argument 1&2 has incompatible type "Union[ExtensionArray,
# ndarray[Any, Any]]"; expected "Union[SupportsDunderLE,
# SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]"
op(left_distances, right_distances) # type: ignore[arg-type]
| (right_indexer == -1),
left_indexer,
right_indexer,
)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
|
(self, target: 'Index', limit: 'int | None', tolerance) -> 'npt.NDArray[np.intp]'
|
65,463 |
pandas.core.indexes.base
|
_get_reconciled_name_object
|
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
|
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name is not name:
return self.rename(name)
return self
|
(self, other)
|
65,464 |
pandas.core.indexes.base
|
_get_string_slice
| null |
def _get_string_slice(self, key: str_t):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
|
(self, key: str)
|
65,465 |
pandas.core.indexes.base
|
_get_values_for_csv
| null |
def _get_values_for_csv(
self,
*,
na_rep: str_t = "",
decimal: str_t = ".",
float_format=None,
date_format=None,
quoting=None,
) -> npt.NDArray[np.object_]:
return get_values_for_csv(
self._values,
na_rep=na_rep,
decimal=decimal,
float_format=float_format,
date_format=date_format,
quoting=quoting,
)
|
(self, *, na_rep: 'str_t' = '', decimal: 'str_t' = '.', float_format=None, date_format=None, quoting=None) -> 'npt.NDArray[np.object_]'
|
65,466 |
pandas.core.indexes.base
|
_getitem_slice
|
Fastpath for __getitem__ when we know we have a slice.
|
def _getitem_slice(self, slobj: slice) -> Self:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._data[slobj]
result = type(self)._simple_new(res, name=self._name, refs=self._references)
if "_engine" in self._cache:
reverse = slobj.step is not None and slobj.step < 0
result._engine._update_from_sliced(self._engine, reverse=reverse) # type: ignore[union-attr]
return result
|
(self, slobj: slice) -> NoneType
|
65,467 |
pandas.core.indexes.base
|
_holds_integer
|
Whether the type is an integer type.
|
@final
def _holds_integer(self) -> bool:
"""
Whether the type is an integer type.
"""
return self.inferred_type in ["integer", "mixed-integer"]
|
(self) -> bool
|
65,468 |
pandas.core.indexes.base
|
_inner_indexer
| null |
@final
def _inner_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
|
(self, other: 'Self') -> 'tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]'
|
65,469 |
pandas.core.indexes.base
|
_intersection
|
intersection specialized to the case with matching dtypes.
|
def _intersection(self, other: Index, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
if (
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and other._can_use_libjoin
):
try:
res_indexer, indexer, _ = self._inner_indexer(other)
except TypeError:
# non-comparable; should only be for object dtype
pass
else:
# TODO: algos.unique1d should preserve DTA/TDA
if is_numeric_dtype(self.dtype):
# This is faster, because Index.unique() checks for uniqueness
# before calculating the unique values.
res = algos.unique1d(res_indexer)
else:
result = self.take(indexer)
res = result.drop_duplicates()
return ensure_wrapped_if_datetimelike(res)
res_values = self._intersection_via_get_indexer(other, sort=sort)
res_values = _maybe_try_sort(res_values, sort)
return res_values
|
(self, other: pandas.core.indexes.base.Index, sort: bool = False)
|
65,470 |
pandas.core.indexes.base
|
_intersection_via_get_indexer
|
Find the intersection of two Indexes using get_indexer.
Returns
-------
np.ndarray or ExtensionArray or MultiIndex
The returned array will be unique.
|
@final
def _intersection_via_get_indexer(
self, other: Index | MultiIndex, sort
) -> ArrayLike | MultiIndex:
"""
Find the intersection of two Indexes using get_indexer.
Returns
-------
np.ndarray or ExtensionArray or MultiIndex
The returned array will be unique.
"""
left_unique = self.unique()
right_unique = other.unique()
# even though we are unique, we need get_indexer_for for IntervalIndex
indexer = left_unique.get_indexer_for(right_unique)
mask = indexer != -1
taker = indexer.take(mask.nonzero()[0])
if sort is False:
# sort bc we want the elements in the same order they are in self
# unnecessary in the case with sort=None bc we will sort later
taker = np.sort(taker)
result: MultiIndex | ExtensionArray | np.ndarray
if isinstance(left_unique, ABCMultiIndex):
result = left_unique.take(taker)
else:
result = left_unique.take(taker)._values
return result
|
(self, other: 'Index | MultiIndex', sort) -> 'ArrayLike | MultiIndex'
|
65,471 |
pandas.core.indexes.category
|
_is_comparable_dtype
| null |
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return self.categories._is_comparable_dtype(dtype)
|
(self, dtype: 'DtypeObj') -> 'bool'
|
65,472 |
pandas.core.indexes.category
|
_is_dtype_compat
|
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
|
def _is_dtype_compat(self, other: Index) -> Categorical:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
"""
if isinstance(other.dtype, CategoricalDtype):
cat = extract_array(other)
cat = cast(Categorical, cat)
if not cat._categories_match_up_to_permutation(self._values):
raise TypeError(
"categories must match existing categories when appending"
)
elif other._is_multi:
# preempt raising NotImplementedError in isna call
raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
else:
values = other
cat = Categorical(other, dtype=self.dtype)
other = CategoricalIndex(cat)
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
cat = other._values
if not ((cat == values) | (isna(cat) & isna(values))).all():
# GH#37667 see test_equals_non_category
raise TypeError(
"categories must match existing categories when appending"
)
return cat
|
(self, other: pandas.core.indexes.base.Index) -> pandas.core.arrays.categorical.Categorical
|
65,473 |
pandas.core.indexes.base
|
_is_memory_usage_qualified
|
Return a boolean if we need a qualified .info display.
|
def _is_memory_usage_qualified(self) -> bool:
"""
Return a boolean if we need a qualified .info display.
"""
return is_object_dtype(self.dtype)
|
(self) -> bool
|
65,474 |
pandas.core.indexes.base
|
_join_empty
| null |
@final
def _join_empty(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
assert len(self) == 0 or len(other) == 0
_validate_join_method(how)
lidx: np.ndarray | None
ridx: np.ndarray | None
if len(other):
how = cast(JoinHow, {"left": "right", "right": "left"}.get(how, how))
join_index, ridx, lidx = other._join_empty(self, how, sort)
elif how in ["left", "outer"]:
if sort and not self.is_monotonic_increasing:
lidx = self.argsort()
join_index = self.take(lidx)
else:
lidx = None
join_index = self._view()
ridx = np.broadcast_to(np.intp(-1), len(join_index))
else:
join_index = other._view()
lidx = np.array([], dtype=np.intp)
ridx = None
return join_index, lidx, ridx
|
(self, other: 'Index', how: 'JoinHow', sort: 'bool') -> 'tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]'
|
65,475 |
pandas.core.indexes.base
|
_join_level
|
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex.
If ```keep_order == True```, the order of the data indexed by the
MultiIndex will not be changed; otherwise, it will tie out
with `other`.
|
@final
def _join_level(
self, other: Index, level, how: JoinHow = "left", keep_order: bool = True
) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex.
If ```keep_order == True```, the order of the data indexed by the
MultiIndex will not be changed; otherwise, it will tie out
with `other`.
"""
from pandas.core.indexes.multi import MultiIndex
def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:
"""
Returns sorter for the inner most level while preserving the
order of higher levels.
Parameters
----------
labels : list[np.ndarray]
Each ndarray has signed integer dtype, not necessarily identical.
Returns
-------
np.ndarray[np.intp]
"""
if labels[0].size == 0:
return np.empty(0, dtype=np.intp)
if len(labels) == 1:
return get_group_index_sorter(ensure_platform_int(labels[0]))
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = ensure_int64(labels[-1])
return lib.get_level_sorter(lab, ensure_platform_int(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError("Join on level between two MultiIndex objects is ambiguous")
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
assert isinstance(left, MultiIndex)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError(
"Index._join_level on non-unique index is not implemented"
)
new_level, left_lev_indexer, right_lev_indexer = old_level.join(
right, how=how, return_indexers=True
)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.codes[: level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = ensure_platform_int(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))
old_codes = left.codes[level]
taker = old_codes[old_codes != -1]
new_lev_codes = rev_indexer.take(taker)
new_codes = list(left.codes)
new_codes[level] = new_lev_codes
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
left_indexer = cast(np.ndarray, left_indexer)
mask = new_lev_codes != -1
if not mask.all():
new_codes = [lab[mask] for lab in new_codes]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max()
ngroups = 1 + max_new_lev
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_codes, ngroups
)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0] :]
new_codes = [lab[left_indexer] for lab in new_codes]
else: # sort the leaves
mask = new_lev_codes != -1
mask_all = mask.all()
if not mask_all:
new_codes = [lab[mask] for lab in new_codes]
left_indexer = _get_leaf_sorter(new_codes[: level + 1])
new_codes = [lab[left_indexer] for lab in new_codes]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(
levels=new_levels,
codes=new_codes,
names=left.names,
verify_integrity=False,
)
if right_lev_indexer is not None:
right_indexer = right_lev_indexer.take(join_index.codes[level])
else:
right_indexer = join_index.codes[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
left_indexer = (
None if left_indexer is None else ensure_platform_int(left_indexer)
)
right_indexer = (
None if right_indexer is None else ensure_platform_int(right_indexer)
)
return join_index, left_indexer, right_indexer
|
(self, other: 'Index', level, how: 'JoinHow' = 'left', keep_order: 'bool' = True) -> 'tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]'
|
65,476 |
pandas.core.indexes.base
|
_join_monotonic
| null |
@final
def _join_monotonic(
self, other: Index, how: JoinHow = "left"
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# We only get here with matching dtypes and both monotonic increasing
assert other.dtype == self.dtype
assert self._can_use_libjoin and other._can_use_libjoin
if self.equals(other):
# This is a convenient place for this check, but its correctness
# does not depend on monotonicity, so it could go earlier
# in the calling method.
ret_index = other if how == "right" else self
return ret_index, None, None
ridx: npt.NDArray[np.intp] | None
lidx: npt.NDArray[np.intp] | None
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == "left":
join_index = self
lidx = None
ridx = self._left_indexer_unique(other)
elif how == "right":
join_index = other
lidx = other._left_indexer_unique(self)
ridx = None
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
else:
if how == "left":
join_array, lidx, ridx = self._left_indexer(other)
elif how == "right":
join_array, ridx, lidx = other._left_indexer(self)
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
assert lidx is not None
assert ridx is not None
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
lidx = None if lidx is None else ensure_platform_int(lidx)
ridx = None if ridx is None else ensure_platform_int(ridx)
return join_index, lidx, ridx
|
(self, other: 'Index', how: 'JoinHow' = 'left') -> 'tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]'
|
65,477 |
pandas.core.indexes.base
|
_join_multi
| null |
@final
def _join_multi(self, other: Index, how: JoinHow):
from pandas.core.indexes.multi import MultiIndex
from pandas.core.reshape.merge import restore_dropped_levels_multijoin
# figure out join names
self_names_list = list(com.not_none(*self.names))
other_names_list = list(com.not_none(*other.names))
self_names_order = self_names_list.index
other_names_order = other_names_list.index
self_names = set(self_names_list)
other_names = set(other_names_list)
overlap = self_names & other_names
# need at least 1 in common
if not overlap:
raise ValueError("cannot join with no overlapping index names")
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
# Drop the non-matching levels from left and right respectively
ldrop_names = sorted(self_names - overlap, key=self_names_order)
rdrop_names = sorted(other_names - overlap, key=other_names_order)
# if only the order differs
if not len(ldrop_names + rdrop_names):
self_jnlevels = self
other_jnlevels = other.reorder_levels(self.names)
else:
self_jnlevels = self.droplevel(ldrop_names)
other_jnlevels = other.droplevel(rdrop_names)
# Join left and right
# Join on same leveled multi-index frames is supported
join_idx, lidx, ridx = self_jnlevels.join(
other_jnlevels, how=how, return_indexers=True
)
# Restore the dropped levels
# Returned index level order is
# common levels, ldrop_names, rdrop_names
dropped_names = ldrop_names + rdrop_names
# error: Argument 5/6 to "restore_dropped_levels_multijoin" has
# incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any
# ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]"
levels, codes, names = restore_dropped_levels_multijoin(
self,
other,
dropped_names,
join_idx,
lidx, # type: ignore[arg-type]
ridx, # type: ignore[arg-type]
)
# Re-create the multi-index
multi_join_idx = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
multi_join_idx = multi_join_idx.remove_unused_levels()
# maintain the order of the index levels
if how == "right":
level_order = other_names_list + ldrop_names
else:
level_order = self_names_list + rdrop_names
multi_join_idx = multi_join_idx.reorder_levels(level_order)
return multi_join_idx, lidx, ridx
jl = next(iter(overlap))
# Case where only one index is multi
# make the indices into mi's that match
flip_order = False
if isinstance(self, MultiIndex):
self, other = other, self
flip_order = True
# flip if join method is right or left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how)
if flip_order:
return result[0], result[2], result[1]
return result
|
(self, other: pandas.core.indexes.base.Index, how: Literal['left', 'right', 'inner', 'outer'])
|
65,478 |
pandas.core.indexes.base
|
_join_non_unique
| null |
@final
def _join_non_unique(
self, other: Index, how: JoinHow = "left", sort: bool = False
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
from pandas.core.reshape.merge import get_join_indexers_non_unique
# We only get here if dtypes match
assert self.dtype == other.dtype
left_idx, right_idx = get_join_indexers_non_unique(
self._values, other._values, how=how, sort=sort
)
mask = left_idx == -1
join_idx = self.take(left_idx)
right = other.take(right_idx)
join_index = join_idx.putmask(mask, right)
if isinstance(join_index, ABCMultiIndex) and how == "outer":
# test_join_index_levels
join_index = join_index._sort_levels_monotonic()
return join_index, left_idx, right_idx
|
(self, other: 'Index', how: 'JoinHow' = 'left', sort: 'bool' = False) -> 'tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]'
|
65,479 |
pandas.core.indexes.base
|
_join_via_get_indexer
| null |
@final
def _join_via_get_indexer(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# Fallback if we do not have any fastpaths available based on
# uniqueness/monotonicity
# Note: at this point we have checked matching dtypes
if how == "left":
join_index = self.sort_values() if sort else self
elif how == "right":
join_index = other.sort_values() if sort else other
elif how == "inner":
join_index = self.intersection(other, sort=sort)
elif how == "outer":
try:
join_index = self.union(other, sort=sort)
except TypeError:
join_index = self.union(other)
try:
join_index = _maybe_try_sort(join_index, sort)
except TypeError:
pass
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer_for(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer_for(join_index)
return join_index, lindexer, rindexer
|
(self, other: 'Index', how: 'JoinHow', sort: 'bool') -> 'tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]'
|
65,480 |
pandas.core.indexes.base
|
_left_indexer
| null |
@final
def _left_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
|
(self, other: 'Self') -> 'tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]'
|
65,481 |
pandas.core.indexes.base
|
_left_indexer_unique
| null |
@final
def _left_indexer_unique(self, other: Self) -> npt.NDArray[np.intp]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# similar but not identical to ov.searchsorted(sv)
return libjoin.left_join_indexer_unique(sv, ov)
|
(self, other: 'Self') -> 'npt.NDArray[np.intp]'
|
65,482 |
pandas.core.indexes.base
|
_logical_method
| null |
@final
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
|
(self, other, op)
|
65,483 |
pandas.core.base
|
_map_values
|
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
convert : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object. Note that the dtype is always
preserved for some extension array dtypes, such as Categorical.
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
|
@final
def _map_values(self, mapper, na_action=None, convert: bool = True):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
convert : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object. Note that the dtype is always
preserved for some extension array dtypes, such as Categorical.
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
arr = self._values
if isinstance(arr, ExtensionArray):
return arr.map(mapper, na_action=na_action)
return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert)
|
(self, mapper, na_action=None, convert: bool = True)
|
65,484 |
pandas.core.indexes.category
|
_maybe_cast_indexer
| null |
def _maybe_cast_indexer(self, key) -> int:
# GH#41933: we have to do this instead of self._data._validate_scalar
# because this will correctly get partial-indexing on Interval categories
try:
return self._data._unbox_scalar(key)
except KeyError:
if is_valid_na_for_dtype(key, self.categories.dtype):
return -1
raise
|
(self, key) -> int
|
65,485 |
pandas.core.indexes.category
|
_maybe_cast_listlike_indexer
| null |
def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
if isinstance(values, CategoricalIndex):
values = values._data
if isinstance(values, Categorical):
# Indexing on codes is more efficient if categories are the same,
# so we can apply some optimizations based on the degree of
# dtype-matching.
cat = self._data._encode_with_my_categories(values)
codes = cat._codes
else:
codes = self.categories.get_indexer(values)
codes = codes.astype(self.codes.dtype, copy=False)
cat = self._data._from_backing_data(codes)
return type(self)._simple_new(cat)
|
(self, values) -> pandas.core.indexes.category.CategoricalIndex
|
65,486 |
pandas.core.indexes.base
|
_maybe_cast_slice_bound
|
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
|
def _maybe_cast_slice_bound(self, label, side: str_t):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. datetimelike Indexes
if is_numeric_dtype(self.dtype):
return self._maybe_cast_indexer(label)
# reject them, if index does not contain label
if (is_float(label) or is_integer(label)) and label not in self:
self._raise_invalid_indexer("slice", label)
return label
|
(self, label, side: str)
|
65,487 |
pandas.core.indexes.base
|
_maybe_check_unique
|
Check that an Index has no duplicates.
This is typically only called via
`NDFrame.flags.allows_duplicate_labels.setter` when it's set to
True (duplicates aren't allowed).
Raises
------
DuplicateLabelError
When the index is not unique.
|
@final
def _maybe_check_unique(self) -> None:
"""
Check that an Index has no duplicates.
This is typically only called via
`NDFrame.flags.allows_duplicate_labels.setter` when it's set to
True (duplicates aren't allowed).
Raises
------
DuplicateLabelError
When the index is not unique.
"""
if not self.is_unique:
msg = """Index has duplicates."""
duplicates = self._format_duplicate_message()
msg += f"\n{duplicates}"
raise DuplicateLabelError(msg)
|
(self) -> NoneType
|
65,488 |
pandas.core.indexes.base
|
_maybe_disable_logical_methods
|
raise if this Index subclass does not support any or all.
|
@final
def _maybe_disable_logical_methods(self, opname: str_t) -> None:
"""
raise if this Index subclass does not support any or all.
"""
if (
isinstance(self, ABCMultiIndex)
# TODO(3.0): PeriodArray and DatetimeArray any/all will raise,
# so checking needs_i8_conversion will be unnecessary
or (needs_i8_conversion(self.dtype) and self.dtype.kind != "m")
):
# This call will raise
make_invalid_op(opname)(self)
|
(self, opname: str) -> NoneType
|
65,489 |
pandas.core.indexes.base
|
_maybe_disallow_fill
|
We only use pandas-style take when allow_fill is True _and_
fill_value is not None.
|
@final
def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:
"""
We only use pandas-style take when allow_fill is True _and_
fill_value is not None.
"""
if allow_fill and fill_value is not None:
# only fill if we are passing a non-None fill_value
if self._can_hold_na:
if (indices < -1).any():
raise ValueError(
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
else:
cls_name = type(self).__name__
raise ValueError(
f"Unable to fill values because {cls_name} cannot contain NA"
)
else:
allow_fill = False
return allow_fill
|
(self, allow_fill: bool, fill_value, indices) -> bool
|
65,490 |
pandas.core.indexes.base
|
_maybe_downcast_for_indexing
|
When dealing with an object-dtype Index and a non-object Index, see
if we can upcast the object-dtype one to improve performance.
|
@final
def _maybe_downcast_for_indexing(self, other: Index) -> tuple[Index, Index]:
"""
When dealing with an object-dtype Index and a non-object Index, see
if we can upcast the object-dtype one to improve performance.
"""
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (
self.tz is not None
and other.tz is not None
and not tz_compare(self.tz, other.tz)
):
# standardize on UTC
return self.tz_convert("UTC"), other.tz_convert("UTC")
elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex):
try:
return type(other)(self), other
except OutOfBoundsDatetime:
return self, other
elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex):
# TODO: we dont have tests that get here
return type(other)(self), other
elif self.dtype.kind == "u" and other.dtype.kind == "i":
# GH#41873
if other.min() >= 0:
# lookup min as it may be cached
# TODO: may need itemsize check if we have non-64-bit Indexes
return self, other.astype(self.dtype)
elif self._is_multi and not other._is_multi:
try:
# "Type[Index]" has no attribute "from_tuples"
other = type(self).from_tuples(other) # type: ignore[attr-defined]
except (TypeError, ValueError):
# let's instead try with a straight Index
self = Index(self._values)
if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype):
# Reverse op so we dont need to re-implement on the subclasses
other, self = other._maybe_downcast_for_indexing(self)
return self, other
|
(self, other: pandas.core.indexes.base.Index) -> tuple[pandas.core.indexes.base.Index, pandas.core.indexes.base.Index]
|
65,491 |
pandas.core.indexes.base
|
_maybe_preserve_names
| null |
def _maybe_preserve_names(self, target: Index, preserve_names: bool):
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy(deep=False)
target.name = self.name
return target
|
(self, target: pandas.core.indexes.base.Index, preserve_names: bool)
|
65,492 |
pandas.core.base
|
_memory_usage
|
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.memory_usage()
24
|
@final
def _memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.memory_usage()
24
"""
if hasattr(self.array, "memory_usage"):
return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues]
deep=deep,
)
v = self.array.nbytes
if deep and is_object_dtype(self.dtype) and not PYPY:
values = cast(np.ndarray, self._values)
v += lib.memory_usage_of_objects(values)
return v
|
(self, deep: bool = False) -> int
|
65,493 |
pandas.core.indexes.base
|
_mpl_repr
| null |
@final
def _mpl_repr(self) -> np.ndarray:
# how to represent ourselves to matplotlib
if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M":
return cast(np.ndarray, self.values)
return self.astype(object, copy=False)._values
|
(self) -> numpy.ndarray
|
65,494 |
pandas.core.indexes.base
|
_outer_indexer
| null |
@final
def _outer_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
|
(self, other: 'Self') -> 'tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]'
|
65,495 |
pandas.core.indexes.base
|
_raise_if_missing
|
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis_name : str
Raises
------
KeyError
If at least one key was requested but none was found.
|
def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:
"""
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis_name : str
Raises
------
KeyError
If at least one key was requested but none was found.
"""
if len(key) == 0:
return
# Count missing values
missing_mask = indexer < 0
nmissing = missing_mask.sum()
if nmissing:
if nmissing == len(indexer):
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())
raise KeyError(f"{not_found} not in index")
|
(self, key, indexer, axis_name: str) -> NoneType
|
65,496 |
pandas.core.indexes.base
|
_raise_invalid_indexer
|
Raise consistent invalid indexer message.
|
@final
def _raise_invalid_indexer(
self,
form: Literal["slice", "positional"],
key,
reraise: lib.NoDefault | None | Exception = lib.no_default,
) -> None:
"""
Raise consistent invalid indexer message.
"""
msg = (
f"cannot do {form} indexing on {type(self).__name__} with these "
f"indexers [{key}] of type {type(key).__name__}"
)
if reraise is not lib.no_default:
raise TypeError(msg) from reraise
raise TypeError(msg)
|
(self, form: Literal['slice', 'positional'], key, reraise: Union[Literal[<no_default>], NoneType, Exception] = <no_default>) -> NoneType
|
65,497 |
pandas.core.indexes.base
|
_reindex_non_unique
|
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
new_indexer : np.ndarray[np.intp] or None
|
@final
def _reindex_non_unique(
self, target: Index
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]:
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
new_indexer : np.ndarray[np.intp] or None
"""
target = ensure_index(target)
if len(target) == 0:
# GH#13691
return self[:0], np.array([], dtype=np.intp), None
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels: Index | np.ndarray = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer), dtype=np.intp)
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = length[~check]
cur_labels = self.take(indexer[check]).values
cur_indexer = length[check]
# Index constructor below will do inference
new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# GH#38906
if not len(self):
new_indexer = np.arange(0, dtype=np.intp)
# a unique indexer
elif target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer), dtype=np.intp)
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)
new_indexer[~check] = -1
if not isinstance(self, ABCMultiIndex):
new_index = Index(new_labels, name=self.name)
else:
new_index = type(self).from_tuples(new_labels, names=self.names)
return new_index, indexer, new_indexer
|
(self, target: 'Index') -> 'tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]'
|
65,498 |
pandas.core.indexes.base
|
_rename
|
fastpath for rename if new name is already validated.
|
@final
def _rename(self, name: Hashable) -> Self:
"""
fastpath for rename if new name is already validated.
"""
result = self._view()
result._name = name
return result
|
(self, name: 'Hashable') -> 'Self'
|
65,500 |
pandas.core.indexes.base
|
_reset_identity
|
Initializes or resets ``_id`` attribute with new object.
|
@final
def _reset_identity(self) -> None:
"""
Initializes or resets ``_id`` attribute with new object.
"""
self._id = object()
|
(self) -> NoneType
|
65,501 |
pandas.core.indexes.extension
|
_reverse_indexer
|
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
Dict[Hashable, np.ndarray[np.intp]]
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
['a', 'a', 'b', 'c', 'a']
Categories (3, object): ['a', 'b', 'c']
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
|
def _inherit_from_data(
name: str, delegate: type, cache: bool = False, wrap: bool = False
):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
name : str
Name of an attribute the class should inherit from its EA parent.
delegate : class
cache : bool, default False
Whether to convert wrapped properties into cache_readonly
wrap : bool, default False
Whether to wrap the inherited result in an Index.
Returns
-------
attribute, method, property, or cache_readonly
"""
attr = getattr(delegate, name)
if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
# getset_descriptor i.e. property defined in cython class
if cache:
def cached(self):
return getattr(self._data, name)
cached.__name__ = name
cached.__doc__ = attr.__doc__
method = cache_readonly(cached)
else:
def fget(self):
result = getattr(self._data, name)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
def fset(self, value) -> None:
setattr(self._data, name, value)
fget.__name__ = name
fget.__doc__ = attr.__doc__
method = property(fget, fset)
elif not callable(attr):
# just a normal attribute, no wrapping
method = attr
else:
# error: Incompatible redefinition (redefinition with type "Callable[[Any,
# VarArg(Any), KwArg(Any)], Any]", original type "property")
def method(self, *args, **kwargs): # type: ignore[misc]
if "inplace" in kwargs:
raise ValueError(f"cannot use inplace with {type(self).__name__}")
result = attr(self._data, *args, **kwargs)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
# error: "property" has no attribute "__name__"
method.__name__ = name # type: ignore[attr-defined]
method.__doc__ = attr.__doc__
return method
|
(self, *args, **kwargs)
|
65,502 |
pandas.core.indexes.base
|
_searchsorted_monotonic
| null |
def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side="right" if side == "left" else "left"
)
return len(self) - pos
raise ValueError("index must be monotonic increasing or decreasing")
|
(self, label, side: Literal['left', 'right'] = 'left')
|
65,503 |
pandas.core.indexes.base
|
_set_names
|
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
|
def _set_names(self, values, *, level=None) -> None:
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError("Names must be a list-like")
if len(values) != 1:
raise ValueError(f"Length of new names must be 1, got {len(values)}")
# GH 20527
# All items in 'name' need to be hashable:
validate_all_hashable(*values, error_name=f"{type(self).__name__}.name")
self._name = values[0]
|
(self, values, *, level=None) -> NoneType
|
65,504 |
pandas.core.indexes.base
|
_shallow_copy
|
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
|
def _shallow_copy(self, values, name: Hashable = no_default) -> Self:
"""
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
"""
name = self._name if name is no_default else name
return self._simple_new(values, name=name, refs=self._references)
|
(self, values, name: 'Hashable' = <no_default>) -> 'Self'
|
65,505 |
pandas.core.indexes.base
|
_should_compare
|
Check if `self == other` can ever have non-False entries.
|
@final
def _should_compare(self, other: Index) -> bool:
"""
Check if `self == other` can ever have non-False entries.
"""
# NB: we use inferred_type rather than is_bool_dtype to catch
# object_dtype_of_bool and categorical[object_dtype_of_bool] cases
if (
other.inferred_type == "boolean" and is_any_real_numeric_dtype(self.dtype)
) or (
self.inferred_type == "boolean" and is_any_real_numeric_dtype(other.dtype)
):
# GH#16877 Treat boolean labels passed to a numeric index as not
# found. Without this fix False and True would be treated as 0 and 1
# respectively.
return False
dtype = _unpack_nested_dtype(other)
return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)
|
(self, other: pandas.core.indexes.base.Index) -> bool
|
65,506 |
pandas.core.indexes.base
|
_should_partial_index
|
Should we attempt partial-matching indexing?
|
@final
def _should_partial_index(self, target: Index) -> bool:
"""
Should we attempt partial-matching indexing?
"""
if isinstance(self.dtype, IntervalDtype):
if isinstance(target.dtype, IntervalDtype):
return False
# "Index" has no attribute "left"
return self.left._should_compare(target) # type: ignore[attr-defined]
return False
|
(self, target: pandas.core.indexes.base.Index) -> bool
|
65,507 |
pandas.core.indexes.base
|
_sort_levels_monotonic
|
Compat with MultiIndex.
|
def _sort_levels_monotonic(self) -> Self:
"""
Compat with MultiIndex.
"""
return self
|
(self) -> NoneType
|
65,508 |
pandas.core.indexes.base
|
_summary
|
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
|
def _summary(self, name=None) -> str_t:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if hasattr(head, "format") and not isinstance(head, str):
head = head.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
head = self._formatter_func(head).replace("'", "")
tail = self[-1]
if hasattr(tail, "format") and not isinstance(tail, str):
tail = tail.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
tail = self._formatter_func(tail).replace("'", "")
index_summary = f", {head} to {tail}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
return f"{name}: {len(self)} entries{index_summary}"
|
(self, name=None) -> str
|
65,509 |
pandas.core.indexes.base
|
_transform_index
|
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
|
@final
def _transform_index(self, func, *, level=None) -> Index:
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(self, ABCMultiIndex):
values = [
self.get_level_values(i).map(func)
if i == level or level is None
else self.get_level_values(i)
for i in range(self.nlevels)
]
return type(self).from_arrays(values)
else:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
|
(self, func, *, level=None) -> pandas.core.indexes.base.Index
|
65,510 |
pandas.core.indexes.base
|
_unary_method
| null |
@final
def _unary_method(self, op):
result = op(self._values)
return Index(result, name=self.name)
|
(self, op)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.