index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
65,274
pandas.core.strings.object_array
_str_isnumeric
null
def _str_isnumeric(self): return self._str_map(str.isnumeric, dtype="bool")
(self)
65,275
pandas.core.strings.object_array
_str_isspace
null
def _str_isspace(self): return self._str_map(str.isspace, dtype="bool")
(self)
65,276
pandas.core.strings.object_array
_str_istitle
null
def _str_istitle(self): return self._str_map(str.istitle, dtype="bool")
(self)
65,277
pandas.core.strings.object_array
_str_isupper
null
def _str_isupper(self): return self._str_map(str.isupper, dtype="bool")
(self)
65,278
pandas.core.strings.object_array
_str_join
null
def _str_join(self, sep: str): return self._str_map(sep.join)
(self, sep: str)
65,279
pandas.core.strings.object_array
_str_len
null
def _str_len(self): return self._str_map(len, dtype="int64")
(self)
65,280
pandas.core.strings.object_array
_str_lower
null
def _str_lower(self): return self._str_map(str.lower)
(self)
65,281
pandas.core.strings.object_array
_str_lstrip
null
def _str_lstrip(self, to_strip=None): return self._str_map(lambda x: x.lstrip(to_strip))
(self, to_strip=None)
65,282
pandas.core.arrays.categorical
_str_map
null
def _str_map( self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True ): # Optimization to apply the callable `f` to the categories once # and rebuild the result by `take`ing from the result with the codes. # Returns the same type as the object-dtype implementation though. from pandas.core.arrays import NumpyExtensionArray categories = self.categories codes = self.codes result = NumpyExtensionArray(categories.to_numpy())._str_map(f, na_value, dtype) return take_nd(result, codes, fill_value=na_value)
(self, f, na_value=nan, dtype=dtype('O'), convert: bool = True)
65,283
pandas.core.strings.object_array
_str_match
null
def _str_match( self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None ): if not case: flags |= re.IGNORECASE regex = re.compile(pat, flags=flags) f = lambda x: regex.match(x) is not None return self._str_map(f, na_value=na, dtype=np.dtype(bool))
(self, pat: 'str', case: 'bool' = True, flags: 'int' = 0, na: 'Scalar | None' = None)
65,284
pandas.core.strings.object_array
_str_normalize
null
def _str_normalize(self, form): f = lambda x: unicodedata.normalize(form, x) return self._str_map(f)
(self, form)
65,285
pandas.core.strings.object_array
_str_pad
null
def _str_pad( self, width: int, side: Literal["left", "right", "both"] = "left", fillchar: str = " ", ): if side == "left": f = lambda x: x.rjust(width, fillchar) elif side == "right": f = lambda x: x.ljust(width, fillchar) elif side == "both": f = lambda x: x.center(width, fillchar) else: # pragma: no cover raise ValueError("Invalid side") return self._str_map(f)
(self, width: int, side: Literal['left', 'right', 'both'] = 'left', fillchar: str = ' ')
65,286
pandas.core.strings.object_array
_str_partition
null
def _str_partition(self, sep: str, expand): result = self._str_map(lambda x: x.partition(sep), dtype="object") return result
(self, sep: str, expand)
65,287
pandas.core.strings.object_array
_str_removeprefix
null
def _str_removeprefix(self, prefix: str) -> Series: # outstanding question on whether to use native methods for users on Python 3.9+ # https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770, # in which case we could do return self._str_map(str.removeprefix) def removeprefix(text: str) -> str: if text.startswith(prefix): return text[len(prefix) :] return text return self._str_map(removeprefix)
(self, prefix: 'str') -> 'Series'
65,288
pandas.core.strings.object_array
_str_removesuffix
null
def _str_removesuffix(self, suffix: str) -> Series: return self._str_map(lambda x: x.removesuffix(suffix))
(self, suffix: 'str') -> 'Series'
65,289
pandas.core.strings.object_array
_str_repeat
null
def _str_repeat(self, repeats: int | Sequence[int]): if lib.is_integer(repeats): rint = cast(int, repeats) def scalar_rep(x): try: return bytes.__mul__(x, rint) except TypeError: return str.__mul__(x, rint) return self._str_map(scalar_rep, dtype=str) else: from pandas.core.arrays.string_ import BaseStringArray def rep(x, r): if x is libmissing.NA: return x try: return bytes.__mul__(x, r) except TypeError: return str.__mul__(x, r) result = libops.vec_binop( np.asarray(self), np.asarray(repeats, dtype=object), rep, ) if isinstance(self, BaseStringArray): # Not going through map, so we have to do this here. result = type(self)._from_sequence(result, dtype=self.dtype) return result
(self, repeats: 'int | Sequence[int]')
65,290
pandas.core.strings.object_array
_str_replace
null
def _str_replace( self, pat: str | re.Pattern, repl: str | Callable, n: int = -1, case: bool = True, flags: int = 0, regex: bool = True, ): if case is False: # add case flag, if provided flags |= re.IGNORECASE if regex or flags or callable(repl): if not isinstance(pat, re.Pattern): if regex is False: pat = re.escape(pat) pat = re.compile(pat, flags=flags) n = n if n >= 0 else 0 f = lambda x: pat.sub(repl=repl, string=x, count=n) else: f = lambda x: x.replace(pat, repl, n) return self._str_map(f, dtype=str)
(self, pat: str | re.Pattern, repl: Union[str, Callable], n: int = -1, case: bool = True, flags: int = 0, regex: bool = True)
65,291
pandas.core.strings.object_array
_str_rfind
null
def _str_rfind(self, sub, start: int = 0, end=None): return self._str_find_(sub, start, end, side="right")
(self, sub, start: int = 0, end=None)
65,292
pandas.core.strings.object_array
_str_rindex
null
def _str_rindex(self, sub, start: int = 0, end=None): if end: f = lambda x: x.rindex(sub, start, end) else: f = lambda x: x.rindex(sub, start, end) return self._str_map(f, dtype="int64")
(self, sub, start: int = 0, end=None)
65,293
pandas.core.strings.object_array
_str_rpartition
null
def _str_rpartition(self, sep: str, expand): return self._str_map(lambda x: x.rpartition(sep), dtype="object")
(self, sep: str, expand)
65,294
pandas.core.strings.object_array
_str_rsplit
null
def _str_rsplit(self, pat=None, n=-1): if n is None or n == 0: n = -1 f = lambda x: x.rsplit(pat, n) return self._str_map(f, dtype="object")
(self, pat=None, n=-1)
65,295
pandas.core.strings.object_array
_str_rstrip
null
def _str_rstrip(self, to_strip=None): return self._str_map(lambda x: x.rstrip(to_strip))
(self, to_strip=None)
65,296
pandas.core.strings.object_array
_str_slice
null
def _str_slice(self, start=None, stop=None, step=None): obj = slice(start, stop, step) return self._str_map(lambda x: x[obj])
(self, start=None, stop=None, step=None)
65,297
pandas.core.strings.object_array
_str_slice_replace
null
def _str_slice_replace(self, start=None, stop=None, repl=None): if repl is None: repl = "" def f(x): if x[start:stop] == "": local_stop = start else: local_stop = stop y = "" if start is not None: y += x[:start] y += repl if stop is not None: y += x[local_stop:] return y return self._str_map(f)
(self, start=None, stop=None, repl=None)
65,298
pandas.core.strings.object_array
_str_split
null
def _str_split( self, pat: str | re.Pattern | None = None, n=-1, expand: bool = False, regex: bool | None = None, ): if pat is None: if n is None or n == 0: n = -1 f = lambda x: x.split(pat, n) else: new_pat: str | re.Pattern if regex is True or isinstance(pat, re.Pattern): new_pat = re.compile(pat) elif regex is False: new_pat = pat # regex is None so link to old behavior #43563 else: if len(pat) == 1: new_pat = pat else: new_pat = re.compile(pat) if isinstance(new_pat, re.Pattern): if n is None or n == -1: n = 0 f = lambda x: new_pat.split(x, maxsplit=n) else: if n is None or n == 0: n = -1 f = lambda x: x.split(pat, n) return self._str_map(f, dtype=object)
(self, pat: Union[str, re.Pattern, NoneType] = None, n=-1, expand: bool = False, regex: Optional[bool] = None)
65,299
pandas.core.strings.object_array
_str_startswith
null
def _str_startswith(self, pat, na=None): f = lambda x: x.startswith(pat) return self._str_map(f, na_value=na, dtype=np.dtype(bool))
(self, pat, na=None)
65,300
pandas.core.strings.object_array
_str_strip
null
def _str_strip(self, to_strip=None): return self._str_map(lambda x: x.strip(to_strip))
(self, to_strip=None)
65,301
pandas.core.strings.object_array
_str_swapcase
null
def _str_swapcase(self): return self._str_map(str.swapcase)
(self)
65,302
pandas.core.strings.object_array
_str_title
null
def _str_title(self): return self._str_map(str.title)
(self)
65,303
pandas.core.strings.object_array
_str_translate
null
def _str_translate(self, table): return self._str_map(lambda x: x.translate(table))
(self, table)
65,304
pandas.core.strings.object_array
_str_upper
null
def _str_upper(self): return self._str_map(lambda x: x.upper())
(self)
65,305
pandas.core.strings.object_array
_str_wrap
null
def _str_wrap(self, width: int, **kwargs): kwargs["width"] = width tw = textwrap.TextWrapper(**kwargs) return self._str_map(lambda s: "\n".join(tw.wrap(s)))
(self, width: int, **kwargs)
65,306
pandas.core.arrays.categorical
_unbox_scalar
null
def _unbox_scalar(self, key) -> int: # searchsorted is very performance sensitive. By converting codes # to same dtype as self.codes, we get much faster performance. code = self.categories.get_loc(key) code = self._ndarray.dtype.type(code) return code
(self, key) -> int
65,307
pandas.core.arrays.categorical
_validate_listlike
null
def _validate_listlike(self, value): # NB: here we assume scalar-like tuples have already been excluded value = extract_array(value, extract_numpy=True) # require identical categories set if isinstance(value, Categorical): if self.dtype != value.dtype: raise TypeError( "Cannot set a Categorical with another, " "without identical categories" ) # dtype equality implies categories_match_up_to_permutation value = self._encode_with_my_categories(value) return value._codes from pandas import Index # tupleize_cols=False for e.g. test_fillna_iterable_category GH#41914 to_add = Index._with_infer(value, tupleize_cols=False).difference( self.categories ) # no assignments of values not in categories, but it's always ok to set # something to np.nan if len(to_add) and not isna(to_add).all(): raise TypeError( "Cannot setitem on a Categorical with a new " "category, set the categories first" ) codes = self.categories.get_indexer(value) return codes.astype(self._ndarray.dtype, copy=False)
(self, value)
65,308
pandas.core.arrays.categorical
_validate_scalar
Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. Parameters ---------- fill_value : object Returns ------- fill_value : int Raises ------ TypeError
def _validate_scalar(self, fill_value): """ Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. Parameters ---------- fill_value : object Returns ------- fill_value : int Raises ------ TypeError """ if is_valid_na_for_dtype(fill_value, self.categories.dtype): fill_value = -1 elif fill_value in self.categories: fill_value = self._unbox_scalar(fill_value) else: raise TypeError( "Cannot setitem on a Categorical with a new " f"category ({fill_value}), set the categories first" ) from None return fill_value
(self, fill_value)
65,309
pandas.core.arrays.categorical
_validate_setitem_value
null
def _validate_setitem_value(self, value): if not is_hashable(value): # wrap scalars and hashable-listlikes in list return self._validate_listlike(value) else: return self._validate_scalar(value)
(self, value)
65,310
pandas.core.arrays._mixins
_values_for_argsort
null
def _values_for_argsort(self) -> np.ndarray: return self._ndarray
(self) -> numpy.ndarray
65,311
pandas.core.arrays._mixins
_values_for_factorize
null
def _values_for_factorize(self): return self._ndarray, self._internal_fill_value
(self)
65,312
pandas.core.arrays.base
_values_for_json
Specify how to render our entries in to_json. Notes ----- The dtype on the returned ndarray is not restricted, but for non-native types that are not specifically handled in objToJSON.c, to_json is liable to raise. In these cases, it may be safer to return an ndarray of strings.
def _values_for_json(self) -> np.ndarray: """ Specify how to render our entries in to_json. Notes ----- The dtype on the returned ndarray is not restricted, but for non-native types that are not specifically handled in objToJSON.c, to_json is liable to raise. In these cases, it may be safer to return an ndarray of strings. """ return np.asarray(self)
(self) -> numpy.ndarray
65,313
pandas.core.arrays.categorical
_values_for_rank
For correctly ranking ordered categorical data. See GH#15420 Ordered categorical data should be ranked on the basis of codes with -1 translated to NaN. Returns ------- numpy.array
def _values_for_rank(self) -> np.ndarray: """ For correctly ranking ordered categorical data. See GH#15420 Ordered categorical data should be ranked on the basis of codes with -1 translated to NaN. Returns ------- numpy.array """ from pandas import Series if self.ordered: values = self.codes mask = values == -1 if mask.any(): values = values.astype("float64") values[mask] = np.nan elif is_any_real_numeric_dtype(self.categories.dtype): values = np.array(self) else: # reorder the categories (so rank can use the float codes) # instead of passing an object array to rank values = np.array( self.rename_categories( Series(self.categories, copy=False).rank().values ) ) return values
(self) -> numpy.ndarray
65,314
pandas.core.arrays._mixins
_where
Analogue to np.where(mask, self, value) Parameters ---------- mask : np.ndarray[bool] value : scalar or listlike Raises ------ TypeError If value cannot be cast to self.dtype.
def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self: """ Analogue to np.where(mask, self, value) Parameters ---------- mask : np.ndarray[bool] value : scalar or listlike Raises ------ TypeError If value cannot be cast to self.dtype. """ value = self._validate_setitem_value(value) res_values = np.where(mask, self._ndarray, value) if res_values.dtype != self._ndarray.dtype: raise AssertionError( # GH#56410 "Something has gone wrong, please report a bug at " "github.com/pandas-dev/pandas/" ) return self._from_backing_data(res_values)
(self: 'Self', mask: 'npt.NDArray[np.bool_]', value) -> 'Self'
65,315
pandas.core.arrays._mixins
_wrap_reduction_result
null
def _wrap_reduction_result(self, axis: AxisInt | None, result): if axis is None or self.ndim == 1: return self._box_func(result) return self._from_backing_data(result)
(self, axis: int | None, result)
65,316
pandas.core.arrays.categorical
add_categories
Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. Returns ------- Categorical Categorical with new categories added. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['c', 'b', 'c']) >>> c ['c', 'b', 'c'] Categories (2, object): ['b', 'c'] >>> c.add_categories(['d', 'a']) ['c', 'b', 'c'] Categories (4, object): ['b', 'c', 'd', 'a']
def add_categories(self, new_categories) -> Self: """ Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. Returns ------- Categorical Categorical with new categories added. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['c', 'b', 'c']) >>> c ['c', 'b', 'c'] Categories (2, object): ['b', 'c'] >>> c.add_categories(['d', 'a']) ['c', 'b', 'c'] Categories (4, object): ['b', 'c', 'd', 'a'] """ if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: raise ValueError( f"new categories must not include old categories: {already_included}" ) if hasattr(new_categories, "dtype"): from pandas import Series dtype = find_common_type( [self.dtype.categories.dtype, new_categories.dtype] ) new_categories = Series( list(self.dtype.categories) + list(new_categories), dtype=dtype ) else: new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = self.copy() codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories) NDArrayBacked.__init__(cat, codes, new_dtype) return cat
(self, new_categories) -> 'Self'
65,317
pandas.core.arrays._mixins
argmax
null
def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override] # override base class by adding axis keyword validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: raise NotImplementedError return nargminmax(self, "argmax", axis=axis)
(self, axis: int = 0, skipna: bool = True)
65,318
pandas.core.arrays._mixins
argmin
null
def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override] # override base class by adding axis keyword validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: raise NotImplementedError return nargminmax(self, "argmin", axis=axis)
(self, axis: int = 0, skipna: bool = True)
65,319
pandas.core.arrays.categorical
argsort
Return the indices that would sort the Categorical. Missing values are sorted at the end. Parameters ---------- ascending : bool, default True Whether the indices should result in an ascending or descending sort. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. **kwargs: passed through to :func:`numpy.argsort`. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort Notes ----- While an ordering is applied to the category values, arg-sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Examples -------- >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort() array([2, 0, 1, 3]) >>> cat = pd.Categorical(['b', 'b', 'a', 'c'], ... categories=['c', 'b', 'a'], ... ordered=True) >>> cat.argsort() array([3, 0, 1, 2]) Missing values are placed at the end >>> cat = pd.Categorical([2, None, 1]) >>> cat.argsort() array([2, 0, 1])
def argsort( self, *, ascending: bool = True, kind: SortKind = "quicksort", **kwargs ): """ Return the indices that would sort the Categorical. Missing values are sorted at the end. Parameters ---------- ascending : bool, default True Whether the indices should result in an ascending or descending sort. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. **kwargs: passed through to :func:`numpy.argsort`. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort Notes ----- While an ordering is applied to the category values, arg-sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Examples -------- >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort() array([2, 0, 1, 3]) >>> cat = pd.Categorical(['b', 'b', 'a', 'c'], ... categories=['c', 'b', 'a'], ... ordered=True) >>> cat.argsort() array([3, 0, 1, 2]) Missing values are placed at the end >>> cat = pd.Categorical([2, None, 1]) >>> cat.argsort() array([2, 0, 1]) """ return super().argsort(ascending=ascending, kind=kind, **kwargs)
(self, *, ascending: 'bool' = True, kind: 'SortKind' = 'quicksort', **kwargs)
65,320
pandas.core.arrays.categorical
as_ordered
Set the Categorical to be ordered. Returns ------- Categorical Ordered Categorical. Examples -------- For :class:`pandas.Series`: >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') >>> ser.cat.ordered False >>> ser = ser.cat.as_ordered() >>> ser.cat.ordered True For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) >>> ci.ordered False >>> ci = ci.as_ordered() >>> ci.ordered True
def as_ordered(self) -> Self: """ Set the Categorical to be ordered. Returns ------- Categorical Ordered Categorical. Examples -------- For :class:`pandas.Series`: >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') >>> ser.cat.ordered False >>> ser = ser.cat.as_ordered() >>> ser.cat.ordered True For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) >>> ci.ordered False >>> ci = ci.as_ordered() >>> ci.ordered True """ return self.set_ordered(True)
(self) -> 'Self'
65,321
pandas.core.arrays.categorical
as_unordered
Set the Categorical to be unordered. Returns ------- Categorical Unordered Categorical. Examples -------- For :class:`pandas.Series`: >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True) >>> ser = pd.Series(raw_cat) >>> ser.cat.ordered True >>> ser = ser.cat.as_unordered() >>> ser.cat.ordered False For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'], ordered=True) >>> ci.ordered True >>> ci = ci.as_unordered() >>> ci.ordered False
def as_unordered(self) -> Self: """ Set the Categorical to be unordered. Returns ------- Categorical Unordered Categorical. Examples -------- For :class:`pandas.Series`: >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True) >>> ser = pd.Series(raw_cat) >>> ser.cat.ordered True >>> ser = ser.cat.as_unordered() >>> ser.cat.ordered False For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'], ordered=True) >>> ci.ordered True >>> ci = ci.as_unordered() >>> ci.ordered False """ return self.set_ordered(False)
(self) -> 'Self'
65,322
pandas.core.arrays.categorical
astype
Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned.
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: """ Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned. """ dtype = pandas_dtype(dtype) if self.dtype is dtype: result = self.copy() if copy else self elif isinstance(dtype, CategoricalDtype): # GH 10696/18593/18630 dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self result = self._set_dtype(dtype) elif isinstance(dtype, ExtensionDtype): return super().astype(dtype, copy=copy) elif dtype.kind in "iu" and self.isna().any(): raise ValueError("Cannot convert float NaN to integer") elif len(self.codes) == 0 or len(self.categories) == 0: result = np.array( self, dtype=dtype, copy=copy, ) else: # GH8628 (PERF): astype category codes instead of astyping array new_cats = self.categories._values try: new_cats = new_cats.astype(dtype=dtype, copy=copy) fill_value = self.categories._na_value if not is_valid_na_for_dtype(fill_value, dtype): fill_value = lib.item_from_zerodim( np.array(self.categories._na_value).astype(dtype) ) except ( TypeError, # downstream error msg for CategoricalIndex is misleading ValueError, ): msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" raise ValueError(msg) result = take_nd( new_cats, ensure_platform_int(self._codes), fill_value=fill_value ) return result
(self, dtype: 'AstypeArg', copy: 'bool' = True) -> 'ArrayLike'
65,323
pandas.core.arrays.categorical
check_for_ordered
assert that we are ordered
def check_for_ordered(self, op) -> None: """assert that we are ordered""" if not self.ordered: raise TypeError( f"Categorical is not ordered for operation {op}\n" "you can use .as_ordered() to change the " "Categorical to an ordered one\n" )
(self, op) -> NoneType
65,324
pandas.core.arrays.categorical
describe
Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category.
def describe(self) -> DataFrame: """ Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category. """ counts = self.value_counts(dropna=False) freqs = counts / counts.sum() from pandas import Index from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = Index(["counts", "freqs"]) result.index.name = "categories" return result
(self) -> 'DataFrame'
65,325
pandas.core.arrays.base
dropna
Return ExtensionArray without NA values. Returns ------- Examples -------- >>> pd.array([1, 2, np.nan]).dropna() <IntegerArray> [1, 2] Length: 2, dtype: Int64
def dropna(self) -> Self: """ Return ExtensionArray without NA values. Returns ------- Examples -------- >>> pd.array([1, 2, np.nan]).dropna() <IntegerArray> [1, 2] Length: 2, dtype: Int64 """ # error: Unsupported operand type for ~ ("ExtensionArray") return self[~self.isna()] # type: ignore[operator]
(self) -> 'Self'
65,326
pandas.core.arrays.base
duplicated
Return boolean ndarray denoting duplicate values. Parameters ---------- keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- ndarray[bool] Examples -------- >>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated() array([False, True, False, False, True])
def duplicated( self, keep: Literal["first", "last", False] = "first" ) -> npt.NDArray[np.bool_]: """ Return boolean ndarray denoting duplicate values. Parameters ---------- keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- ndarray[bool] Examples -------- >>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated() array([False, True, False, False, True]) """ mask = self.isna().astype(np.bool_, copy=False) return duplicated(values=self, keep=keep, mask=mask)
(self, keep: "Literal['first', 'last', False]" = 'first') -> 'npt.NDArray[np.bool_]'
65,327
pandas.core.arrays.categorical
equals
Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- bool
def equals(self, other: object) -> bool: """ Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- bool """ if not isinstance(other, Categorical): return False elif self._categories_match_up_to_permutation(other): other = self._encode_with_my_categories(other) return np.array_equal(self._codes, other._codes) return False
(self, other: object) -> bool
65,328
pandas.core.arrays.base
factorize
Encode the extension array as an enumerated type. Parameters ---------- use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. .. versionadded:: 1.5.0 Returns ------- codes : ndarray An integer NumPy array that's an indexer into the original ExtensionArray. uniques : ExtensionArray An ExtensionArray containing the unique values of `self`. .. note:: uniques will *not* contain an entry for the NA value of the ExtensionArray if there are any missing values present in `self`. See Also -------- factorize : Top-level factorize method that dispatches here. Notes ----- :meth:`pandas.factorize` offers a `sort` keyword as well. Examples -------- >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02", ... "2014-03", "2014-03"], freq="M") >>> arr, idx = idx1.factorize() >>> arr array([0, 0, 1, 1, 2, 2]) >>> idx PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')
def factorize( self, use_na_sentinel: bool = True, ) -> tuple[np.ndarray, ExtensionArray]: """ Encode the extension array as an enumerated type. Parameters ---------- use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. .. versionadded:: 1.5.0 Returns ------- codes : ndarray An integer NumPy array that's an indexer into the original ExtensionArray. uniques : ExtensionArray An ExtensionArray containing the unique values of `self`. .. note:: uniques will *not* contain an entry for the NA value of the ExtensionArray if there are any missing values present in `self`. See Also -------- factorize : Top-level factorize method that dispatches here. Notes ----- :meth:`pandas.factorize` offers a `sort` keyword as well. Examples -------- >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02", ... "2014-03", "2014-03"], freq="M") >>> arr, idx = idx1.factorize() >>> arr array([0, 0, 1, 1, 2, 2]) >>> idx PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]') """ # Implementer note: There are two ways to override the behavior of # pandas.factorize # 1. _values_for_factorize and _from_factorize. # Specify the values passed to pandas' internal factorization # routines, and how to convert from those values back to the # original ExtensionArray. # 2. ExtensionArray.factorize. # Complete control over factorization. arr, na_value = self._values_for_factorize() codes, uniques = factorize_array( arr, use_na_sentinel=use_na_sentinel, na_value=na_value ) uniques_ea = self._from_factorized(uniques, self) return codes, uniques_ea
(self, use_na_sentinel: bool = True) -> tuple[numpy.ndarray, pandas.core.arrays.base.ExtensionArray]
65,329
pandas.core.arrays._mixins
fillna
Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, array-like If a scalar value is passed it is used to fill all missing values. Alternatively, an array-like "value" can be given. It's expected that the array-like have the same length as 'self'. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series: * pad / ffill: propagate last valid observation forward to next valid. * backfill / bfill: use NEXT valid observation to fill gap. .. deprecated:: 2.1.0 limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. .. deprecated:: 2.1.0 copy : bool, default True Whether to make a copy of the data before filling. If False, then the original should be modified and no new memory should be allocated. For ExtensionArray subclasses that cannot do this, it is at the author's discretion whether to ignore "copy=False" or to raise. The base class implementation ignores the keyword in pad/backfill cases. Returns ------- ExtensionArray With NA/NaN filled. Examples -------- >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan]) >>> arr.fillna(0) <IntegerArray> [0, 0, 2, 3, 0, 0] Length: 6, dtype: Int64
@doc(ExtensionArray.fillna) def fillna( self, value=None, method=None, limit: int | None = None, copy: bool = True ) -> Self: value, method = validate_fillna_kwargs( value, method, validate_scalar_dict_value=False ) mask = self.isna() # error: Argument 2 to "check_value_size" has incompatible type # "ExtensionArray"; expected "ndarray" value = missing.check_value_size( value, mask, len(self) # type: ignore[arg-type] ) if mask.any(): if method is not None: # (for now) when self.ndim == 2, we assume axis=0 func = missing.get_fill_func(method, ndim=self.ndim) npvalues = self._ndarray.T if copy: npvalues = npvalues.copy() func(npvalues, limit=limit, mask=mask.T) npvalues = npvalues.T # TODO: NumpyExtensionArray didn't used to copy, need tests # for this new_values = self._from_backing_data(npvalues) else: # fill with value if copy: new_values = self.copy() else: new_values = self[:] new_values[mask] = value else: # We validate the fill_value even if there is nothing to fill if value is not None: self._validate_setitem_value(value) if not copy: new_values = self[:] else: new_values = self.copy() return new_values
(self, value=None, method=None, limit: Optional[int] = None, copy: bool = True) -> NoneType
65,330
pandas.core.arrays._mixins
insert
Make new ExtensionArray inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- type(self)
def insert(self, loc: int, item) -> Self: """ Make new ExtensionArray inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- type(self) """ loc = validate_insert_loc(loc, len(self)) code = self._validate_scalar(item) new_vals = np.concatenate( ( self._ndarray[:loc], np.asarray([code], dtype=self._ndarray.dtype), self._ndarray[loc:], ) ) return self._from_backing_data(new_vals)
(self, loc: int, item) -> NoneType
65,331
pandas.core.arrays.base
interpolate
See DataFrame.interpolate.__doc__. Examples -------- >>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3])) >>> arr.interpolate(method="linear", ... limit=3, ... limit_direction="forward", ... index=pd.Index([1, 2, 3, 4]), ... fill_value=1, ... copy=False, ... axis=0, ... limit_area="inside" ... ) <NumpyExtensionArray> [0.0, 1.0, 2.0, 3.0] Length: 4, dtype: float64
def interpolate( self, *, method: InterpolateOptions, axis: int, index: Index, limit, limit_direction, limit_area, copy: bool, **kwargs, ) -> Self: """ See DataFrame.interpolate.__doc__. Examples -------- >>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3])) >>> arr.interpolate(method="linear", ... limit=3, ... limit_direction="forward", ... index=pd.Index([1, 2, 3, 4]), ... fill_value=1, ... copy=False, ... axis=0, ... limit_area="inside" ... ) <NumpyExtensionArray> [0.0, 1.0, 2.0, 3.0] Length: 4, dtype: float64 """ # NB: we return type(self) even if copy=False raise NotImplementedError( f"{type(self).__name__} does not implement interpolate" )
(self, *, method: 'InterpolateOptions', axis: 'int', index: 'Index', limit, limit_direction, limit_area, copy: 'bool', **kwargs) -> 'Self'
65,332
pandas.core.arrays.categorical
isin
Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : np.ndarray or ExtensionArray The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- np.ndarray[bool] Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False])
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: """ Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : np.ndarray or ExtensionArray The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- np.ndarray[bool] Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False]) """ null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer_for(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values)
(self, values: 'ArrayLike') -> 'npt.NDArray[np.bool_]'
65,333
pandas.core.arrays.categorical
isna
Detect missing values Missing values (-1 in .codes) are detected. Returns ------- np.ndarray[bool] of whether my values are null See Also -------- isna : Top-level isna. isnull : Alias of isna. Categorical.notna : Boolean inverse of Categorical.isna.
def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values Missing values (-1 in .codes) are detected. Returns ------- np.ndarray[bool] of whether my values are null See Also -------- isna : Top-level isna. isnull : Alias of isna. Categorical.notna : Boolean inverse of Categorical.isna. """ return self._codes == -1
(self) -> 'npt.NDArray[np.bool_]'
65,335
pandas.core.arrays.categorical
map
Map categories using an input mapping or function. Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default 'ignore' If 'ignore', propagate NaN values, without passing them to the mapping correspondence. .. deprecated:: 2.1.0 The default value of 'ignore' has been deprecated and will be changed to None in the future. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> cat.map(lambda x: x.upper(), na_action=None) ['A', 'B', 'C'] Categories (3, object): ['A', 'B', 'C'] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}, na_action=None) ['first', 'second', 'third'] Categories (3, object): ['first', 'second', 'third'] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a' < 'b' < 'c'] >>> cat.map({'a': 3, 'b': 2, 'c': 1}, na_action=None) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}, na_action=None) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}, na_action=None) Index(['first', 'second', nan], dtype='object')
def map( self, mapper, na_action: Literal["ignore"] | None | lib.NoDefault = lib.no_default, ): """ Map categories using an input mapping or function. Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default 'ignore' If 'ignore', propagate NaN values, without passing them to the mapping correspondence. .. deprecated:: 2.1.0 The default value of 'ignore' has been deprecated and will be changed to None in the future. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> cat.map(lambda x: x.upper(), na_action=None) ['A', 'B', 'C'] Categories (3, object): ['A', 'B', 'C'] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}, na_action=None) ['first', 'second', 'third'] Categories (3, object): ['first', 'second', 'third'] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a' < 'b' < 'c'] >>> cat.map({'a': 3, 'b': 2, 'c': 1}, na_action=None) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}, na_action=None) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}, na_action=None) Index(['first', 'second', nan], dtype='object') """ if na_action is lib.no_default: warnings.warn( "The default value of 'ignore' for the `na_action` parameter in " "pandas.Categorical.map is deprecated and will be " "changed to 'None' in a future version. Please set na_action to the " "desired value to avoid seeing this warning", FutureWarning, stacklevel=find_stack_level(), ) na_action = "ignore" assert callable(mapper) or is_dict_like(mapper) new_categories = self.categories.map(mapper) has_nans = np.any(self._codes == -1) na_val = np.nan if na_action is None and has_nans: na_val = mapper(np.nan) if callable(mapper) else mapper.get(np.nan, np.nan) if new_categories.is_unique and not new_categories.hasnans and na_val is np.nan: new_dtype = CategoricalDtype(new_categories, ordered=self.ordered) return self.from_codes(self._codes.copy(), dtype=new_dtype, validate=False) if has_nans: new_categories = new_categories.insert(len(new_categories), na_val) return np.take(new_categories, self._codes)
(self, mapper, na_action: Union[Literal['ignore'], NoneType, Literal[<no_default>]] = <no_default>)
65,336
pandas.core.arrays.categorical
max
The maximum value of the object. Only ordered `Categoricals` have a maximum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- max : the maximum of this `Categorical`, NA if array is empty
def max(self, *, skipna: bool = True, **kwargs): """ The maximum value of the object. Only ordered `Categoricals` have a maximum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- max : the maximum of this `Categorical`, NA if array is empty """ nv.validate_minmax_axis(kwargs.get("axis", 0)) nv.validate_max((), kwargs) self.check_for_ordered("max") if not len(self._codes): return self.dtype.na_value good = self._codes != -1 if not good.all(): if skipna and good.any(): pointer = self._codes[good].max() else: return np.nan else: pointer = self._codes.max() return self._wrap_reduction_result(None, pointer)
(self, *, skipna: bool = True, **kwargs)
65,337
pandas.core.arrays.categorical
memory_usage
Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes
def memory_usage(self, deep: bool = False) -> int: """ Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
(self, deep: bool = False) -> int
65,338
pandas.core.arrays.categorical
min
The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical`, NA value if empty
def min(self, *, skipna: bool = True, **kwargs): """ The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical`, NA value if empty """ nv.validate_minmax_axis(kwargs.get("axis", 0)) nv.validate_min((), kwargs) self.check_for_ordered("min") if not len(self._codes): return self.dtype.na_value good = self._codes != -1 if not good.all(): if skipna and good.any(): pointer = self._codes[good].min() else: return np.nan else: pointer = self._codes.min() return self._wrap_reduction_result(None, pointer)
(self, *, skipna: bool = True, **kwargs)
65,339
pandas.core.arrays.categorical
notna
Inverse of isna Both missing values (-1 in .codes) and NA as a category are detected as null. Returns ------- np.ndarray[bool] of whether my values are not null See Also -------- notna : Top-level notna. notnull : Alias of notna. Categorical.isna : Boolean inverse of Categorical.notna.
def notna(self) -> npt.NDArray[np.bool_]: """ Inverse of isna Both missing values (-1 in .codes) and NA as a category are detected as null. Returns ------- np.ndarray[bool] of whether my values are not null See Also -------- notna : Top-level notna. notnull : Alias of notna. Categorical.isna : Boolean inverse of Categorical.notna. """ return ~self.isna()
(self) -> 'npt.NDArray[np.bool_]'
65,341
pandas.core.arrays.categorical
remove_categories
Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Parameters ---------- removals : category or list of categories The categories which should be removed. Returns ------- Categorical Categorical with removed categories. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_categories(['d', 'a']) [NaN, 'c', 'b', 'c', NaN] Categories (2, object): ['b', 'c']
def remove_categories(self, removals) -> Self: """ Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Parameters ---------- removals : category or list of categories The categories which should be removed. Returns ------- Categorical Categorical with removed categories. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_categories(['d', 'a']) [NaN, 'c', 'b', 'c', NaN] Categories (2, object): ['b', 'c'] """ from pandas import Index if not is_list_like(removals): removals = [removals] removals = Index(removals).unique().dropna() new_categories = ( self.dtype.categories.difference(removals, sort=False) if self.dtype.ordered is True else self.dtype.categories.difference(removals) ) not_included = removals.difference(self.dtype.categories) if len(not_included) != 0: not_included = set(not_included) raise ValueError(f"removals must all be in old categories: {not_included}") return self.set_categories(new_categories, ordered=self.ordered, rename=False)
(self, removals) -> 'Self'
65,342
pandas.core.arrays.categorical
remove_unused_categories
Remove categories which are not used. Returns ------- Categorical Categorical with unused categories dropped. See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c[2] = 'a' >>> c[4] = 'c' >>> c ['a', 'c', 'a', 'c', 'c'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_unused_categories() ['a', 'c', 'a', 'c', 'c'] Categories (2, object): ['a', 'c']
def remove_unused_categories(self) -> Self: """ Remove categories which are not used. Returns ------- Categorical Categorical with unused categories dropped. See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) >>> c ['a', 'c', 'b', 'c', 'd'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c[2] = 'a' >>> c[4] = 'c' >>> c ['a', 'c', 'a', 'c', 'c'] Categories (4, object): ['a', 'b', 'c', 'd'] >>> c.remove_unused_categories() ['a', 'c', 'a', 'c', 'c'] Categories (2, object): ['a', 'c'] """ idx, inv = np.unique(self._codes, return_inverse=True) if idx.size != 0 and idx[0] == -1: # na sentinel idx, inv = idx[1:], inv - 1 new_categories = self.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath( new_categories, ordered=self.ordered ) new_codes = coerce_indexer_dtype(inv, new_dtype.categories) cat = self.copy() NDArrayBacked.__init__(cat, new_codes, new_dtype) return cat
(self) -> 'Self'
65,343
pandas.core.arrays.categorical
rename_categories
Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable New categories which will replace old categories. * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. Returns ------- Categorical Categorical with renamed categories. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) ['A', 'A', 'b'] Categories (2, object): ['A', 'b'] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) ['A', 'A', 'B'] Categories (2, object): ['A', 'B']
def rename_categories(self, new_categories) -> Self: """ Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable New categories which will replace old categories. * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. Returns ------- Categorical Categorical with renamed categories. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) ['A', 'A', 'b'] Categories (2, object): ['A', 'b'] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) ['A', 'A', 'B'] Categories (2, object): ['A', 'B'] """ if is_dict_like(new_categories): new_categories = [ new_categories.get(item, item) for item in self.categories ] elif callable(new_categories): new_categories = [new_categories(item) for item in self.categories] cat = self.copy() cat._set_categories(new_categories) return cat
(self, new_categories) -> 'Self'
65,344
pandas.core.arrays.categorical
reorder_categories
Reorder categories as specified in new_categories. ``new_categories`` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- For :class:`pandas.Series`: >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') >>> ser = ser.cat.reorder_categories(['c', 'b', 'a'], ordered=True) >>> ser 0 a 1 b 2 c 3 a dtype: category Categories (3, object): ['c' < 'b' < 'a'] >>> ser.sort_values() 2 c 1 b 0 a 3 a dtype: category Categories (3, object): ['c' < 'b' < 'a'] For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) >>> ci CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'], ordered=False, dtype='category') >>> ci.reorder_categories(['c', 'b', 'a'], ordered=True) CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'], ordered=True, dtype='category')
def reorder_categories(self, new_categories, ordered=None) -> Self: """ Reorder categories as specified in new_categories. ``new_categories`` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- For :class:`pandas.Series`: >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') >>> ser = ser.cat.reorder_categories(['c', 'b', 'a'], ordered=True) >>> ser 0 a 1 b 2 c 3 a dtype: category Categories (3, object): ['c' < 'b' < 'a'] >>> ser.sort_values() 2 c 1 b 0 a 3 a dtype: category Categories (3, object): ['c' < 'b' < 'a'] For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) >>> ci CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'], ordered=False, dtype='category') >>> ci.reorder_categories(['c', 'b', 'a'], ordered=True) CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'], ordered=True, dtype='category') """ if ( len(self.categories) != len(new_categories) or not self.categories.difference(new_categories).empty ): raise ValueError( "items in new_categories are not the same as in old categories" ) return self.set_categories(new_categories, ordered=ordered)
(self, new_categories, ordered=None) -> 'Self'
65,345
pandas.core.arrays._mixins
searchsorted
Find indices where elements should be inserted to maintain order. Find the indices into a sorted array `self` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Assuming that `self` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``self[i-1] < value <= self[i]`` right ``self[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- value : array-like, list or scalar Value(s) to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array-like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints or int If value is array-like, array of insertion points. If value is scalar, a single integer. See Also -------- numpy.searchsorted : Similar method from NumPy. Examples -------- >>> arr = pd.array([1, 2, 3, 5]) >>> arr.searchsorted([4]) array([3])
@doc(ExtensionArray.searchsorted) def searchsorted( self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter | None = None, ) -> npt.NDArray[np.intp] | np.intp: npvalue = self._validate_setitem_value(value) return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter)
(self, value: 'NumpyValueArrayLike | ExtensionArray', side: "Literal['left', 'right']" = 'left', sorter: 'NumpySorter | None' = None) -> 'npt.NDArray[np.intp] | np.intp'
65,346
pandas.core.arrays.categorical
set_categories
Set the categories to the specified new categories. ``new_categories`` can include new categories (which will result in unused categories) or remove old categories (which results in values set to ``NaN``). If ``rename=True``, the categories will simply be renamed (less or more items than in old categories will result in values set to ``NaN`` or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes, which does not considers a S1 string equal to a single char python string. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. Returns ------- Categorical with reordered categories. Raises ------ ValueError If new_categories does not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. Examples -------- For :class:`pandas.Series`: >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'A'], ... categories=['a', 'b', 'c'], ordered=True) >>> ser = pd.Series(raw_cat) >>> ser 0 a 1 b 2 c 3 NaN dtype: category Categories (3, object): ['a' < 'b' < 'c'] >>> ser.cat.set_categories(['A', 'B', 'C'], rename=True) 0 A 1 B 2 C 3 NaN dtype: category Categories (3, object): ['A' < 'B' < 'C'] For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'A'], ... categories=['a', 'b', 'c'], ordered=True) >>> ci CategoricalIndex(['a', 'b', 'c', nan], categories=['a', 'b', 'c'], ordered=True, dtype='category') >>> ci.set_categories(['A', 'b', 'c']) CategoricalIndex([nan, 'b', 'c', nan], categories=['A', 'b', 'c'], ordered=True, dtype='category') >>> ci.set_categories(['A', 'b', 'c'], rename=True) CategoricalIndex(['A', 'b', 'c', nan], categories=['A', 'b', 'c'], ordered=True, dtype='category')
def set_categories(self, new_categories, ordered=None, rename: bool = False): """ Set the categories to the specified new categories. ``new_categories`` can include new categories (which will result in unused categories) or remove old categories (which results in values set to ``NaN``). If ``rename=True``, the categories will simply be renamed (less or more items than in old categories will result in values set to ``NaN`` or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes, which does not considers a S1 string equal to a single char python string. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. Returns ------- Categorical with reordered categories. Raises ------ ValueError If new_categories does not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. Examples -------- For :class:`pandas.Series`: >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'A'], ... categories=['a', 'b', 'c'], ordered=True) >>> ser = pd.Series(raw_cat) >>> ser 0 a 1 b 2 c 3 NaN dtype: category Categories (3, object): ['a' < 'b' < 'c'] >>> ser.cat.set_categories(['A', 'B', 'C'], rename=True) 0 A 1 B 2 C 3 NaN dtype: category Categories (3, object): ['A' < 'B' < 'C'] For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'A'], ... categories=['a', 'b', 'c'], ordered=True) >>> ci CategoricalIndex(['a', 'b', 'c', nan], categories=['a', 'b', 'c'], ordered=True, dtype='category') >>> ci.set_categories(['A', 'b', 'c']) CategoricalIndex([nan, 'b', 'c', nan], categories=['A', 'b', 'c'], ordered=True, dtype='category') >>> ci.set_categories(['A', 'b', 'c'], rename=True) CategoricalIndex(['A', 'b', 'c', nan], categories=['A', 'b', 'c'], ordered=True, dtype='category') """ if ordered is None: ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = self.copy() if rename: if cat.dtype.categories is not None and len(new_dtype.categories) < len( cat.dtype.categories ): # remove all _codes which are larger and set to -1/NaN cat._codes[cat._codes >= len(new_dtype.categories)] = -1 codes = cat._codes else: codes = recode_for_categories( cat.codes, cat.categories, new_dtype.categories ) NDArrayBacked.__init__(cat, codes, new_dtype) return cat
(self, new_categories, ordered=None, rename: bool = False)
65,347
pandas.core.arrays.categorical
set_ordered
Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False).
def set_ordered(self, value: bool) -> Self: """ Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). """ new_dtype = CategoricalDtype(self.categories, ordered=value) cat = self.copy() NDArrayBacked.__init__(cat, cat._ndarray, new_dtype) return cat
(self, value: 'bool') -> 'Self'
65,348
pandas.core.arrays._mixins
shift
Shift values by desired number. Newly introduced missing values are filled with ``self.dtype.na_value``. Parameters ---------- periods : int, default 1 The number of periods to shift. Negative values are allowed for shifting backwards. fill_value : object, optional The scalar value to use for newly introduced missing values. The default is ``self.dtype.na_value``. Returns ------- ExtensionArray Shifted. Notes ----- If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is returned. If ``periods > len(self)``, then an array of size len(self) is returned, with all values filled with ``self.dtype.na_value``. For 2-dimensional ExtensionArrays, we are always shifting along axis=0. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.shift(2) <IntegerArray> [<NA>, <NA>, 1] Length: 3, dtype: Int64
@doc(ExtensionArray.shift) def shift(self, periods: int = 1, fill_value=None): # NB: shift is always along axis=0 axis = 0 fill_value = self._validate_scalar(fill_value) new_values = shift(self._ndarray, periods, axis, fill_value) return self._from_backing_data(new_values)
(self, periods: int = 1, fill_value=None)
65,349
pandas.core.arrays.categorical
sort_values
Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : bool, default False Do operation in place. ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2, 2, NaN, 5] Categories (2, int64): [2, 5] >>> c.sort_values() [2, 2, 5, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2, 2, 5] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5, 2, 2] Categories (2, int64): [2, 5]
def sort_values( self, *, inplace: bool = False, ascending: bool = True, na_position: str = "last", ) -> Self | None: """ Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : bool, default False Do operation in place. ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2, 2, NaN, 5] Categories (2, int64): [2, 5] >>> c.sort_values() [2, 2, 5, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2, 2, 5] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5, 2, 2] Categories (2, int64): [2, 5] """ inplace = validate_bool_kwarg(inplace, "inplace") if na_position not in ["last", "first"]: raise ValueError(f"invalid na_position: {repr(na_position)}") sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if not inplace: codes = self._codes[sorted_idx] return self._from_backing_data(codes) self._codes[:] = self._codes[sorted_idx] return None
(self, *, inplace: 'bool' = False, ascending: 'bool' = True, na_position: 'str' = 'last') -> 'Self | None'
65,350
pandas.core.arrays._mixins
take
null
def take( self, indices: TakeIndexer, *, allow_fill: bool = False, fill_value: Any = None, axis: AxisInt = 0, ) -> Self: if allow_fill: fill_value = self._validate_scalar(fill_value) new_data = take( self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value, axis=axis, ) return self._from_backing_data(new_data)
(self, indices: Any, *, allow_fill: bool = False, fill_value: Optional[Any] = None, axis: int = 0) -> NoneType
65,351
pandas.core.arrays.categorical
to_list
Alias for tolist.
def to_list(self): """ Alias for tolist. """ # GH#51254 warnings.warn( "Categorical.to_list is deprecated and will be removed in a future " "version. Use obj.tolist() instead", FutureWarning, stacklevel=find_stack_level(), ) return self.tolist()
(self)
65,352
pandas.core.arrays.base
to_numpy
Convert to a NumPy ndarray. This is similar to :meth:`numpy.asarray`, but may provide additional control over how the conversion is done. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the type of the array. Returns ------- numpy.ndarray
def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert to a NumPy ndarray. This is similar to :meth:`numpy.asarray`, but may provide additional control over how the conversion is done. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the type of the array. Returns ------- numpy.ndarray """ result = np.asarray(self, dtype=dtype) if copy or na_value is not lib.no_default: result = result.copy() if na_value is not lib.no_default: result[self.isna()] = na_value return result
(self, dtype: 'npt.DTypeLike | None' = None, copy: 'bool' = False, na_value: 'object' = <no_default>) -> 'np.ndarray'
65,353
pandas.core.arrays.base
tolist
Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.tolist() [1, 2, 3]
def tolist(self) -> list: """ Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.tolist() [1, 2, 3] """ if self.ndim > 1: return [x.tolist() for x in self] return list(self)
(self) -> list
65,354
pandas.core.arrays.categorical
unique
Return the ``Categorical`` which ``categories`` and ``codes`` are unique. .. versionchanged:: 1.3.0 Previously, unused categories were dropped from the new categories. Returns ------- Categorical See Also -------- pandas.unique CategoricalIndex.unique Series.unique : Return unique values of Series object. Examples -------- >>> pd.Categorical(list("baabc")).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique() ['b', 'a'] Categories (3, object): ['a' < 'b' < 'c']
def unique(self) -> Self: """ Return the ``Categorical`` which ``categories`` and ``codes`` are unique. .. versionchanged:: 1.3.0 Previously, unused categories were dropped from the new categories. Returns ------- Categorical See Also -------- pandas.unique CategoricalIndex.unique Series.unique : Return unique values of Series object. Examples -------- >>> pd.Categorical(list("baabc")).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique() ['b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] """ # pylint: disable=useless-parent-delegation return super().unique()
(self) -> 'Self'
65,355
pandas.core.arrays.categorical
value_counts
Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts
def value_counts(self, dropna: bool = True) -> Series: """ Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts """ from pandas import ( CategoricalIndex, Series, ) code, cat = self._codes, self.categories ncat, mask = (len(cat), code >= 0) ix, clean = np.arange(ncat), mask.all() if dropna or clean: obs = code if clean else code[mask] count = np.bincount(obs, minlength=ncat or 0) else: count = np.bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) ix = coerce_indexer_dtype(ix, self.dtype.categories) ix = self._from_backing_data(ix) return Series( count, index=CategoricalIndex(ix), dtype="int64", name="count", copy=False )
(self, dropna: 'bool' = True) -> 'Series'
65,356
pandas.core.arrays._mixins
view
null
def view(self, dtype: Dtype | None = None) -> ArrayLike: # We handle datetime64, datetime64tz, timedelta64, and period # dtypes here. Everything else we pass through to the underlying # ndarray. if dtype is None or dtype is self.dtype: return self._from_backing_data(self._ndarray) if isinstance(dtype, type): # we sometimes pass non-dtype objects, e.g np.ndarray; # pass those through to the underlying ndarray return self._ndarray.view(dtype) dtype = pandas_dtype(dtype) arr = self._ndarray if isinstance(dtype, PeriodDtype): cls = dtype.construct_array_type() return cls(arr.view("i8"), dtype=dtype) elif isinstance(dtype, DatetimeTZDtype): dt_cls = dtype.construct_array_type() dt64_values = arr.view(f"M8[{dtype.unit}]") return dt_cls._simple_new(dt64_values, dtype=dtype) elif lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype): from pandas.core.arrays import DatetimeArray dt64_values = arr.view(dtype) return DatetimeArray._simple_new(dt64_values, dtype=dtype) elif lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype): from pandas.core.arrays import TimedeltaArray td64_values = arr.view(dtype) return TimedeltaArray._simple_new(td64_values, dtype=dtype) # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None, # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" return arr.view(dtype=dtype) # type: ignore[arg-type]
(self, dtype: Union[pandas.core.dtypes.base.ExtensionDtype, str, numpy.dtype, Type[Union[str, complex, bool, object]], NoneType] = None) -> Union[pandas.core.arrays.base.ExtensionArray, numpy.ndarray]
65,357
pandas.core.dtypes.dtypes
CategoricalDtype
Type for categorical data with the categories and orderedness. Parameters ---------- categories : sequence, optional Must be unique, and must not contain any nulls. The categories are stored in an Index, and if an index is provided the dtype of that index will be used. ordered : bool or None, default False Whether or not this categorical is treated as a ordered categorical. None can be used to maintain the ordered value of existing categoricals when used in operations that combine categoricals, e.g. astype, and will resolve to False if there is no existing ordered to maintain. Attributes ---------- categories ordered Methods ------- None See Also -------- Categorical : Represent a categorical variable in classic R / S-plus fashion. Notes ----- This class is useful for specifying the type of a ``Categorical`` independent of the values. See :ref:`categorical.categoricaldtype` for more. Examples -------- >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True) >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t) 0 a 1 b 2 a 3 NaN dtype: category Categories (2, object): ['b' < 'a'] An empty CategoricalDtype with a specific dtype can be created by providing an empty index. As follows, >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype dtype('<M8[ns]')
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): """ Type for categorical data with the categories and orderedness. Parameters ---------- categories : sequence, optional Must be unique, and must not contain any nulls. The categories are stored in an Index, and if an index is provided the dtype of that index will be used. ordered : bool or None, default False Whether or not this categorical is treated as a ordered categorical. None can be used to maintain the ordered value of existing categoricals when used in operations that combine categoricals, e.g. astype, and will resolve to False if there is no existing ordered to maintain. Attributes ---------- categories ordered Methods ------- None See Also -------- Categorical : Represent a categorical variable in classic R / S-plus fashion. Notes ----- This class is useful for specifying the type of a ``Categorical`` independent of the values. See :ref:`categorical.categoricaldtype` for more. Examples -------- >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True) >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t) 0 a 1 b 2 a 3 NaN dtype: category Categories (2, object): ['b' < 'a'] An empty CategoricalDtype with a specific dtype can be created by providing an empty index. As follows, >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype dtype('<M8[ns]') """ # TODO: Document public vs. private API name = "category" type: type[CategoricalDtypeType] = CategoricalDtypeType kind: str_type = "O" str = "|O08" base = np.dtype("O") _metadata = ("categories", "ordered") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} _supports_2d = False _can_fast_transpose = False def __init__(self, categories=None, ordered: Ordered = False) -> None: self._finalize(categories, ordered, fastpath=False) @classmethod def _from_fastpath( cls, categories=None, ordered: bool | None = None ) -> CategoricalDtype: self = cls.__new__(cls) self._finalize(categories, ordered, fastpath=True) return self @classmethod def _from_categorical_dtype( cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None ) -> CategoricalDtype: if categories is ordered is None: return dtype if categories is None: categories = dtype.categories if ordered is None: ordered = dtype.ordered return cls(categories, ordered) @classmethod def _from_values_or_dtype( cls, values=None, categories=None, ordered: bool | None = None, dtype: Dtype | None = None, ) -> CategoricalDtype: """ Construct dtype from the input parameters used in :class:`Categorical`. This constructor method specifically does not do the factorization step, if that is needed to find the categories. This constructor may therefore return ``CategoricalDtype(categories=None, ordered=None)``, which may not be useful. Additional steps may therefore have to be taken to create the final dtype. The return dtype is specified from the inputs in this prioritized order: 1. if dtype is a CategoricalDtype, return dtype 2. if dtype is the string 'category', create a CategoricalDtype from the supplied categories and ordered parameters, and return that. 3. if values is a categorical, use value.dtype, but override it with categories and ordered if either/both of those are not None. 4. if dtype is None and values is not a categorical, construct the dtype from categories and ordered, even if either of those is None. Parameters ---------- values : list-like, optional The list-like must be 1-dimensional. categories : list-like, optional Categories for the CategoricalDtype. ordered : bool, optional Designating if the categories are ordered. dtype : CategoricalDtype or the string "category", optional If ``CategoricalDtype``, cannot be used together with `categories` or `ordered`. Returns ------- CategoricalDtype Examples -------- >>> pd.CategoricalDtype._from_values_or_dtype() CategoricalDtype(categories=None, ordered=None, categories_dtype=None) >>> pd.CategoricalDtype._from_values_or_dtype( ... categories=['a', 'b'], ordered=True ... ) CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object) >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False) >>> c = pd.Categorical([0, 1], dtype=dtype1) >>> pd.CategoricalDtype._from_values_or_dtype( ... c, ['x', 'y'], ordered=True, dtype=dtype2 ... ) Traceback (most recent call last): ... ValueError: Cannot specify `categories` or `ordered` together with `dtype`. The supplied dtype takes precedence over values' dtype: >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object) """ if dtype is not None: # The dtype argument takes precedence over values.dtype (if any) if isinstance(dtype, str): if dtype == "category": if ordered is None and cls.is_dtype(values): # GH#49309 preserve orderedness ordered = values.dtype.ordered dtype = CategoricalDtype(categories, ordered) else: raise ValueError(f"Unknown dtype {repr(dtype)}") elif categories is not None or ordered is not None: raise ValueError( "Cannot specify `categories` or `ordered` together with `dtype`." ) elif not isinstance(dtype, CategoricalDtype): raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}") elif cls.is_dtype(values): # If no "dtype" was passed, use the one from "values", but honor # the "ordered" and "categories" arguments dtype = values.dtype._from_categorical_dtype( values.dtype, categories, ordered ) else: # If dtype=None and values is not categorical, create a new dtype. # Note: This could potentially have categories=None and # ordered=None. dtype = CategoricalDtype(categories, ordered) return cast(CategoricalDtype, dtype) @classmethod def construct_from_string(cls, string: str_type) -> CategoricalDtype: """ Construct a CategoricalDtype from a string. Parameters ---------- string : str Must be the string "category" in order to be successfully constructed. Returns ------- CategoricalDtype Instance of the dtype. Raises ------ TypeError If a CategoricalDtype cannot be constructed from the input. """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if string != cls.name: raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'") # need ordered=None to ensure that operations specifying dtype="category" don't # override the ordered value for existing categoricals return cls(ordered=None) def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None: if ordered is not None: self.validate_ordered(ordered) if categories is not None: categories = self.validate_categories(categories, fastpath=fastpath) self._categories = categories self._ordered = ordered def __setstate__(self, state: MutableMapping[str_type, Any]) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._categories = state.pop("categories", None) self._ordered = state.pop("ordered", False) def __hash__(self) -> int: # _hash_categories returns a uint64, so use the negative # space for when we have unknown categories to avoid a conflict if self.categories is None: if self.ordered: return -1 else: return -2 # We *do* want to include the real self.ordered here return int(self._hash_categories) def __eq__(self, other: object) -> bool: """ Rules for CDT equality: 1) Any CDT is equal to the string 'category' 2) Any CDT is equal to itself 3) Any CDT is equal to a CDT with categories=None regardless of ordered 4) A CDT with ordered=True is only equal to another CDT with ordered=True and identical categories in the same order 5) A CDT with ordered={False, None} is only equal to another CDT with ordered={False, None} and identical categories, but same order is not required. There is no distinction between False/None. 6) Any other comparison returns False """ if isinstance(other, str): return other == self.name elif other is self: return True elif not (hasattr(other, "ordered") and hasattr(other, "categories")): return False elif self.categories is None or other.categories is None: # For non-fully-initialized dtypes, these are only equal to # - the string "category" (handled above) # - other CategoricalDtype with categories=None return self.categories is other.categories elif self.ordered or other.ordered: # At least one has ordered=True; equal if both have ordered=True # and the same values for categories in the same order. return (self.ordered == other.ordered) and self.categories.equals( other.categories ) else: # Neither has ordered=True; equal if both have the same categories, # but same order is not necessary. There is no distinction between # ordered=False and ordered=None: CDT(., False) and CDT(., None) # will be equal if they have the same categories. left = self.categories right = other.categories # GH#36280 the ordering of checks here is for performance if not left.dtype == right.dtype: return False if len(left) != len(right): return False if self.categories.equals(other.categories): # Check and see if they happen to be identical categories return True if left.dtype != object: # Faster than calculating hash indexer = left.get_indexer(right) # Because left and right have the same length and are unique, # `indexer` not having any -1s implies that there is a # bijection between `left` and `right`. return (indexer != -1).all() # With object-dtype we need a comparison that identifies # e.g. int(2) as distinct from float(2) return set(left) == set(right) def __repr__(self) -> str_type: if self.categories is None: data = "None" dtype = "None" else: data = self.categories._format_data(name=type(self).__name__) if isinstance(self.categories, ABCRangeIndex): data = str(self.categories._range) data = data.rstrip(", ") dtype = self.categories.dtype return ( f"CategoricalDtype(categories={data}, ordered={self.ordered}, " f"categories_dtype={dtype})" ) @cache_readonly def _hash_categories(self) -> int: from pandas.core.util.hashing import ( combine_hash_arrays, hash_array, hash_tuples, ) categories = self.categories ordered = self.ordered if len(categories) and isinstance(categories[0], tuple): # assumes if any individual category is a tuple, then all our. ATM # I don't really want to support just some of the categories being # tuples. cat_list = list(categories) # breaks if a np.array of categories cat_array = hash_tuples(cat_list) else: if categories.dtype == "O" and len({type(x) for x in categories}) != 1: # TODO: hash_array doesn't handle mixed types. It casts # everything to a str first, which means we treat # {'1', '2'} the same as {'1', 2} # find a better solution hashed = hash((tuple(categories), ordered)) return hashed if DatetimeTZDtype.is_dtype(categories.dtype): # Avoid future warning. categories = categories.view("datetime64[ns]") cat_array = hash_array(np.asarray(categories), categorize=False) if ordered: cat_array = np.vstack( [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)] ) else: cat_array = np.array([cat_array]) combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(combined_hashed) @classmethod def construct_array_type(cls) -> type_t[Categorical]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas import Categorical return Categorical @staticmethod def validate_ordered(ordered: Ordered) -> None: """ Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. Parameters ---------- ordered : object The parameter to be verified. Raises ------ TypeError If 'ordered' is not a boolean. """ if not is_bool(ordered): raise TypeError("'ordered' must either be 'True' or 'False'") @staticmethod def validate_categories(categories, fastpath: bool = False) -> Index: """ Validates that we have good categories Parameters ---------- categories : array-like fastpath : bool Whether to skip nan and uniqueness checks Returns ------- categories : Index """ from pandas.core.indexes.base import Index if not fastpath and not is_list_like(categories): raise TypeError( f"Parameter 'categories' must be list-like, was {repr(categories)}" ) if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols=False) if not fastpath: if categories.hasnans: raise ValueError("Categorical categories cannot be null") if not categories.is_unique: raise ValueError("Categorical categories must be unique") if isinstance(categories, ABCCategoricalIndex): categories = categories.categories return categories def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: """ Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified Parameters ---------- dtype : CategoricalDtype Returns ------- new_dtype : CategoricalDtype """ if isinstance(dtype, str) and dtype == "category": # dtype='category' should not change anything return self elif not self.is_dtype(dtype): raise ValueError( f"a CategoricalDtype must be passed to perform an update, " f"got {repr(dtype)}" ) else: # from here on, dtype is a CategoricalDtype dtype = cast(CategoricalDtype, dtype) # update categories/ordered unless they've been explicitly passed as None new_categories = ( dtype.categories if dtype.categories is not None else self.categories ) new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered return CategoricalDtype(new_categories, new_ordered) @property def categories(self) -> Index: """ An ``Index`` containing the unique categories allowed. Examples -------- >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True) >>> cat_type.categories Index(['a', 'b'], dtype='object') """ return self._categories @property def ordered(self) -> Ordered: """ Whether the categories have an ordered relationship. Examples -------- >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True) >>> cat_type.ordered True >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False) >>> cat_type.ordered False """ return self._ordered @property def _is_boolean(self) -> bool: from pandas.core.dtypes.common import is_bool_dtype return is_bool_dtype(self.categories) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # check if we have all categorical dtype with identical categories if all(isinstance(x, CategoricalDtype) for x in dtypes): first = dtypes[0] if all(first == other for other in dtypes[1:]): return first # special case non-initialized categorical # TODO we should figure out the expected return value in general non_init_cats = [ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes ] if all(non_init_cats): return self elif any(non_init_cats): return None # categorical is aware of Sparse -> extract sparse subdtypes dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] # extract the categories' dtype non_cat_dtypes = [ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes ] # TODO should categorical always give an answer? from pandas.core.dtypes.cast import find_common_type return find_common_type(non_cat_dtypes) @cache_readonly def index_class(self) -> type_t[CategoricalIndex]: from pandas import CategoricalIndex return CategoricalIndex
(categories=None, ordered: 'Ordered' = False) -> 'None'
65,358
pandas.core.dtypes.dtypes
__eq__
Rules for CDT equality: 1) Any CDT is equal to the string 'category' 2) Any CDT is equal to itself 3) Any CDT is equal to a CDT with categories=None regardless of ordered 4) A CDT with ordered=True is only equal to another CDT with ordered=True and identical categories in the same order 5) A CDT with ordered={False, None} is only equal to another CDT with ordered={False, None} and identical categories, but same order is not required. There is no distinction between False/None. 6) Any other comparison returns False
def __eq__(self, other: object) -> bool: """ Rules for CDT equality: 1) Any CDT is equal to the string 'category' 2) Any CDT is equal to itself 3) Any CDT is equal to a CDT with categories=None regardless of ordered 4) A CDT with ordered=True is only equal to another CDT with ordered=True and identical categories in the same order 5) A CDT with ordered={False, None} is only equal to another CDT with ordered={False, None} and identical categories, but same order is not required. There is no distinction between False/None. 6) Any other comparison returns False """ if isinstance(other, str): return other == self.name elif other is self: return True elif not (hasattr(other, "ordered") and hasattr(other, "categories")): return False elif self.categories is None or other.categories is None: # For non-fully-initialized dtypes, these are only equal to # - the string "category" (handled above) # - other CategoricalDtype with categories=None return self.categories is other.categories elif self.ordered or other.ordered: # At least one has ordered=True; equal if both have ordered=True # and the same values for categories in the same order. return (self.ordered == other.ordered) and self.categories.equals( other.categories ) else: # Neither has ordered=True; equal if both have the same categories, # but same order is not necessary. There is no distinction between # ordered=False and ordered=None: CDT(., False) and CDT(., None) # will be equal if they have the same categories. left = self.categories right = other.categories # GH#36280 the ordering of checks here is for performance if not left.dtype == right.dtype: return False if len(left) != len(right): return False if self.categories.equals(other.categories): # Check and see if they happen to be identical categories return True if left.dtype != object: # Faster than calculating hash indexer = left.get_indexer(right) # Because left and right have the same length and are unique, # `indexer` not having any -1s implies that there is a # bijection between `left` and `right`. return (indexer != -1).all() # With object-dtype we need a comparison that identifies # e.g. int(2) as distinct from float(2) return set(left) == set(right)
(self, other: object) -> bool
65,359
pandas.core.dtypes.dtypes
__getstate__
null
def __getstate__(self) -> dict[str_type, Any]: # pickle support; we don't want to pickle the cache return {k: getattr(self, k, None) for k in self._metadata}
(self) -> dict[str, typing.Any]
65,360
pandas.core.dtypes.dtypes
__hash__
null
def __hash__(self) -> int: # _hash_categories returns a uint64, so use the negative # space for when we have unknown categories to avoid a conflict if self.categories is None: if self.ordered: return -1 else: return -2 # We *do* want to include the real self.ordered here return int(self._hash_categories)
(self) -> int
65,361
pandas.core.dtypes.dtypes
__init__
null
def __init__(self, categories=None, ordered: Ordered = False) -> None: self._finalize(categories, ordered, fastpath=False)
(self, categories=None, ordered: 'Ordered' = False) -> 'None'
65,363
pandas.core.dtypes.dtypes
__repr__
null
def __repr__(self) -> str_type: if self.categories is None: data = "None" dtype = "None" else: data = self.categories._format_data(name=type(self).__name__) if isinstance(self.categories, ABCRangeIndex): data = str(self.categories._range) data = data.rstrip(", ") dtype = self.categories.dtype return ( f"CategoricalDtype(categories={data}, ordered={self.ordered}, " f"categories_dtype={dtype})" )
(self) -> str
65,364
pandas.core.dtypes.dtypes
__setstate__
null
def __setstate__(self, state: MutableMapping[str_type, Any]) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._categories = state.pop("categories", None) self._ordered = state.pop("ordered", False)
(self, state: 'MutableMapping[str_type, Any]') -> 'None'
65,366
pandas.core.dtypes.dtypes
_finalize
null
def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None: if ordered is not None: self.validate_ordered(ordered) if categories is not None: categories = self.validate_categories(categories, fastpath=fastpath) self._categories = categories self._ordered = ordered
(self, categories, ordered: 'Ordered', fastpath: 'bool' = False) -> 'None'
65,367
pandas.core.dtypes.dtypes
_get_common_dtype
null
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # check if we have all categorical dtype with identical categories if all(isinstance(x, CategoricalDtype) for x in dtypes): first = dtypes[0] if all(first == other for other in dtypes[1:]): return first # special case non-initialized categorical # TODO we should figure out the expected return value in general non_init_cats = [ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes ] if all(non_init_cats): return self elif any(non_init_cats): return None # categorical is aware of Sparse -> extract sparse subdtypes dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] # extract the categories' dtype non_cat_dtypes = [ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes ] # TODO should categorical always give an answer? from pandas.core.dtypes.cast import find_common_type return find_common_type(non_cat_dtypes)
(self, dtypes: 'list[DtypeObj]') -> 'DtypeObj | None'
65,369
pandas.core.dtypes.dtypes
update_dtype
Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified Parameters ---------- dtype : CategoricalDtype Returns ------- new_dtype : CategoricalDtype
def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: """ Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified Parameters ---------- dtype : CategoricalDtype Returns ------- new_dtype : CategoricalDtype """ if isinstance(dtype, str) and dtype == "category": # dtype='category' should not change anything return self elif not self.is_dtype(dtype): raise ValueError( f"a CategoricalDtype must be passed to perform an update, " f"got {repr(dtype)}" ) else: # from here on, dtype is a CategoricalDtype dtype = cast(CategoricalDtype, dtype) # update categories/ordered unless they've been explicitly passed as None new_categories = ( dtype.categories if dtype.categories is not None else self.categories ) new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered return CategoricalDtype(new_categories, new_ordered)
(self, dtype: str | pandas.core.dtypes.dtypes.CategoricalDtype) -> pandas.core.dtypes.dtypes.CategoricalDtype
65,370
pandas.core.dtypes.dtypes
validate_categories
Validates that we have good categories Parameters ---------- categories : array-like fastpath : bool Whether to skip nan and uniqueness checks Returns ------- categories : Index
@staticmethod def validate_categories(categories, fastpath: bool = False) -> Index: """ Validates that we have good categories Parameters ---------- categories : array-like fastpath : bool Whether to skip nan and uniqueness checks Returns ------- categories : Index """ from pandas.core.indexes.base import Index if not fastpath and not is_list_like(categories): raise TypeError( f"Parameter 'categories' must be list-like, was {repr(categories)}" ) if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols=False) if not fastpath: if categories.hasnans: raise ValueError("Categorical categories cannot be null") if not categories.is_unique: raise ValueError("Categorical categories must be unique") if isinstance(categories, ABCCategoricalIndex): categories = categories.categories return categories
(categories, fastpath: 'bool' = False) -> 'Index'
65,371
pandas.core.dtypes.dtypes
validate_ordered
Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. Parameters ---------- ordered : object The parameter to be verified. Raises ------ TypeError If 'ordered' is not a boolean.
@staticmethod def validate_ordered(ordered: Ordered) -> None: """ Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. Parameters ---------- ordered : object The parameter to be verified. Raises ------ TypeError If 'ordered' is not a boolean. """ if not is_bool(ordered): raise TypeError("'ordered' must either be 'True' or 'False'")
(ordered: 'Ordered') -> 'None'
65,372
pandas.core.indexes.category
CategoricalIndex
Index based on an underlying :class:`Categorical`. CategoricalIndex, like Categorical, can only take on a limited, and usually fixed, number of possible values (`categories`). Also, like Categorical, it might have an order, but numerical operations (additions, divisions, ...) are not possible. Parameters ---------- data : array-like (1-dimensional) The values of the categorical. If `categories` are given, values not in `categories` will be replaced with NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here (and also not in `dtype`), they will be inferred from the `data`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. copy : bool, default False Make a copy of input ndarray. name : object, optional Name to be stored in the index. Attributes ---------- codes categories ordered Methods ------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories set_categories as_ordered as_unordered map Raises ------ ValueError If the categories do not validate. TypeError If an explicit ``ordered=True`` is given but no `categories` and the `values` are not sortable. See Also -------- Index : The base pandas Index type. Categorical : A categorical array. CategoricalDtype : Type for categorical data. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__ for more. Examples -------- >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') ``CategoricalIndex`` can also be instantiated from a ``Categorical``: >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"]) >>> pd.CategoricalIndex(c) CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') Ordered ``CategoricalIndex`` can have a min and max value. >>> ci = pd.CategoricalIndex( ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"] ... ) >>> ci CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') >>> ci.min() 'c'
class CategoricalIndex(NDArrayBackedExtensionIndex): """ Index based on an underlying :class:`Categorical`. CategoricalIndex, like Categorical, can only take on a limited, and usually fixed, number of possible values (`categories`). Also, like Categorical, it might have an order, but numerical operations (additions, divisions, ...) are not possible. Parameters ---------- data : array-like (1-dimensional) The values of the categorical. If `categories` are given, values not in `categories` will be replaced with NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here (and also not in `dtype`), they will be inferred from the `data`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. copy : bool, default False Make a copy of input ndarray. name : object, optional Name to be stored in the index. Attributes ---------- codes categories ordered Methods ------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories set_categories as_ordered as_unordered map Raises ------ ValueError If the categories do not validate. TypeError If an explicit ``ordered=True`` is given but no `categories` and the `values` are not sortable. See Also -------- Index : The base pandas Index type. Categorical : A categorical array. CategoricalDtype : Type for categorical data. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__ for more. Examples -------- >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') ``CategoricalIndex`` can also be instantiated from a ``Categorical``: >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"]) >>> pd.CategoricalIndex(c) CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') Ordered ``CategoricalIndex`` can have a min and max value. >>> ci = pd.CategoricalIndex( ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"] ... ) >>> ci CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') >>> ci.min() 'c' """ _typ = "categoricalindex" _data_cls = Categorical @property def _can_hold_strings(self): return self.categories._can_hold_strings @cache_readonly def _should_fallback_to_positional(self) -> bool: return self.categories._should_fallback_to_positional codes: np.ndarray categories: Index ordered: bool | None _data: Categorical _values: Categorical @property def _engine_type(self) -> type[libindex.IndexEngine]: # self.codes can have dtype int8, int16, int32 or int64, so we need # to return the corresponding engine type (libindex.Int8Engine, etc.). return { np.int8: libindex.Int8Engine, np.int16: libindex.Int16Engine, np.int32: libindex.Int32Engine, np.int64: libindex.Int64Engine, }[self.codes.dtype.type] # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, categories=None, ordered=None, dtype: Dtype | None = None, copy: bool = False, name: Hashable | None = None, ) -> Self: name = maybe_extract_name(name, data, cls) if is_scalar(data): # GH#38944 include None here, which pre-2.0 subbed in [] cls._raise_scalar_data_error(data) data = Categorical( data, categories=categories, ordered=ordered, dtype=dtype, copy=copy ) return cls._simple_new(data, name=name) # -------------------------------------------------------------------- def _is_dtype_compat(self, other: Index) -> Categorical: """ *this is an internal non-public method* provide a comparison between the dtype of self and other (coercing if needed) Parameters ---------- other : Index Returns ------- Categorical Raises ------ TypeError if the dtypes are not compatible """ if isinstance(other.dtype, CategoricalDtype): cat = extract_array(other) cat = cast(Categorical, cat) if not cat._categories_match_up_to_permutation(self._values): raise TypeError( "categories must match existing categories when appending" ) elif other._is_multi: # preempt raising NotImplementedError in isna call raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex") else: values = other cat = Categorical(other, dtype=self.dtype) other = CategoricalIndex(cat) if not other.isin(values).all(): raise TypeError( "cannot append a non-category item to a CategoricalIndex" ) cat = other._values if not ((cat == values) | (isna(cat) & isna(values))).all(): # GH#37667 see test_equals_non_category raise TypeError( "categories must match existing categories when appending" ) return cat def equals(self, other: object) -> bool: """ Determine if two CategoricalIndex objects contain the same elements. Returns ------- bool ``True`` if two :class:`pandas.CategoricalIndex` objects have equal elements, ``False`` otherwise. Examples -------- >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])) >>> ci.equals(ci2) True The order of elements matters. >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c']) >>> ci.equals(ci3) False The orderedness also matters. >>> ci4 = ci.as_ordered() >>> ci.equals(ci4) False The categories matter, but the order of the categories matters only when ``ordered=True``. >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd']) >>> ci.equals(ci5) False >>> ci6 = ci.set_categories(['b', 'c', 'a']) >>> ci.equals(ci6) True >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], ... ordered=True) >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a']) >>> ci_ordered.equals(ci2_ordered) False """ if self.is_(other): return True if not isinstance(other, Index): return False try: other = self._is_dtype_compat(other) except (TypeError, ValueError): return False return self._data.equals(other) # -------------------------------------------------------------------- # Rendering Methods @property def _formatter_func(self): return self.categories._formatter_func def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ attrs: list[tuple[str, str | int | bool | None]] attrs = [ ( "categories", f"[{', '.join(self._data._repr_categories())}]", ), ("ordered", self.ordered), ] extra = super()._format_attrs() return attrs + extra # -------------------------------------------------------------------- @property def inferred_type(self) -> str: return "categorical" @doc(Index.__contains__) def __contains__(self, key: Any) -> bool: # if key is a NaN, check if any NaN is in self. if is_valid_na_for_dtype(key, self.categories.dtype): return self.hasnans return contains(self, key, container=self._engine) def reindex( self, target, method=None, level=None, limit: int | None = None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.Index Resulting index indexer : np.ndarray[np.intp] or None Indices of output values in original index """ if method is not None: raise NotImplementedError( "argument method is not implemented for CategoricalIndex.reindex" ) if level is not None: raise NotImplementedError( "argument level is not implemented for CategoricalIndex.reindex" ) if limit is not None: raise NotImplementedError( "argument limit is not implemented for CategoricalIndex.reindex" ) return super().reindex(target) # -------------------------------------------------------------------- # Indexing Methods def _maybe_cast_indexer(self, key) -> int: # GH#41933: we have to do this instead of self._data._validate_scalar # because this will correctly get partial-indexing on Interval categories try: return self._data._unbox_scalar(key) except KeyError: if is_valid_na_for_dtype(key, self.categories.dtype): return -1 raise def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex: if isinstance(values, CategoricalIndex): values = values._data if isinstance(values, Categorical): # Indexing on codes is more efficient if categories are the same, # so we can apply some optimizations based on the degree of # dtype-matching. cat = self._data._encode_with_my_categories(values) codes = cat._codes else: codes = self.categories.get_indexer(values) codes = codes.astype(self.codes.dtype, copy=False) cat = self._data._from_backing_data(codes) return type(self)._simple_new(cat) # -------------------------------------------------------------------- def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return self.categories._is_comparable_dtype(dtype) def map(self, mapper, na_action: Literal["ignore"] | None = None): """ Map values using input an input mapping or function. Maps the values (their categories, not the codes) of the index to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.CategoricalIndex` which has the same order property as the original, otherwise an :class:`~pandas.Index` is returned. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.CategoricalIndex or pandas.Index Mapped index. See Also -------- Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> idx = pd.CategoricalIndex(['a', 'b', 'c']) >>> idx CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') >>> idx.map(lambda x: x.upper()) CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'], ordered=False, dtype='category') >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'}) CategoricalIndex(['first', 'second', 'third'], categories=['first', 'second', 'third'], ordered=False, dtype='category') If the mapping is one-to-one the ordering of the categories is preserved: >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True) >>> idx CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=True, dtype='category') >>> idx.map({'a': 3, 'b': 2, 'c': 1}) CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True, dtype='category') If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> idx.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') """ mapped = self._values.map(mapper, na_action=na_action) return Index(mapped, name=self.name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: # if calling index is category, don't check dtype of others try: cat = Categorical._concat_same_type( [self._is_dtype_compat(c) for c in to_concat] ) except TypeError: # not all to_concat elements are among our categories (or NA) res = concat_compat([x._values for x in to_concat]) return Index(res, name=name) else: return type(self)._simple_new(cat, name=name)
(data=None, categories=None, ordered=None, dtype: 'Dtype | None' = None, copy: 'bool' = False, name: 'Hashable | None' = None) -> 'Self'
65,373
pandas.core.indexes.base
__abs__
null
def __abs__(self) -> Index: return self._unary_method(operator.abs)
(self) -> pandas.core.indexes.base.Index
65,374
pandas.core.arraylike
__add__
Get Addition of DataFrame and other, column-wise. Equivalent to ``DataFrame.add(other)``. Parameters ---------- other : scalar, sequence, Series, dict or DataFrame Object to be added to the DataFrame. Returns ------- DataFrame The result of adding ``other`` to DataFrame. See Also -------- DataFrame.add : Add a DataFrame and another object, with option for index- or column-oriented addition. Examples -------- >>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]}, ... index=['elk', 'moose']) >>> df height weight elk 1.5 500 moose 2.6 800 Adding a scalar affects all rows and columns. >>> df[['height', 'weight']] + 1.5 height weight elk 3.0 501.5 moose 4.1 801.5 Each element of a list is added to a column of the DataFrame, in order. >>> df[['height', 'weight']] + [0.5, 1.5] height weight elk 2.0 501.5 moose 3.1 801.5 Keys of a dictionary are aligned to the DataFrame, based on column names; each value in the dictionary is added to the corresponding column. >>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5} height weight elk 2.0 501.5 moose 3.1 801.5 When `other` is a :class:`Series`, the index of `other` is aligned with the columns of the DataFrame. >>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height']) >>> df[['height', 'weight']] + s1 height weight elk 3.0 500.5 moose 4.1 800.5 Even when the index of `other` is the same as the index of the DataFrame, the :class:`Series` will not be reoriented. If index-wise alignment is desired, :meth:`DataFrame.add` should be used with `axis='index'`. >>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose']) >>> df[['height', 'weight']] + s2 elk height moose weight elk NaN NaN NaN NaN moose NaN NaN NaN NaN >>> df[['height', 'weight']].add(s2, axis='index') height weight elk 2.0 500.5 moose 4.1 801.5 When `other` is a :class:`DataFrame`, both columns names and the index are aligned. >>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]}, ... index=['elk', 'moose', 'deer']) >>> df[['height', 'weight']] + other height weight deer NaN NaN elk 1.7 NaN moose 3.0 NaN
@unpack_zerodim_and_defer("__ge__") def __ge__(self, other): return self._cmp_method(other, operator.ge)
(self, other)
65,375
pandas.core.arraylike
__and__
null
@unpack_zerodim_and_defer("__ge__") def __ge__(self, other): return self._cmp_method(other, operator.ge)
(self, other)
65,376
pandas.core.indexes.base
__array__
The array interface, return my values.
def __array__(self, dtype=None, copy=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype)
(self, dtype=None, copy=None) -> numpy.ndarray
65,377
pandas.core.indexes.base
__array_ufunc__
null
def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) elif method == "reduce": result = lib.item_from_zerodim(result) return result if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result)
(self, ufunc: numpy.ufunc, method: str, *inputs, **kwargs)
65,378
pandas.core.indexes.base
__array_wrap__
Gets called after a ufunc and other functions e.g. np.split.
@final def __array_wrap__(self, result, context=None, return_scalar=False): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if (not isinstance(result, Index) and is_bool_dtype(result.dtype)) or np.ndim( result ) > 1: # exclude Index to avoid warning from is_bool_dtype deprecation; # in the Index case it doesn't matter which path we go down. # reached in plotting tests with e.g. np.nonzero(index) return result return Index(result, name=self.name)
(self, result, context=None, return_scalar=False)