index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
67,745 |
pandas.core.series
|
_flex_method
| null |
def _flex_method(self, other, op, *, level=None, fill_value=None, axis: Axis = 0):
if axis is not None:
self._get_axis_number(axis)
res_name = ops.get_op_result_name(self, other)
if isinstance(other, Series):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError("Lengths must be equal")
other = self._constructor(other, self.index, copy=False)
result = self._binop(other, op, level=level, fill_value=fill_value)
result._name = res_name
return result
else:
if fill_value is not None:
if isna(other):
return op(self, fill_value)
self = self.fillna(fill_value)
return op(self, other)
|
(self, other, op, *, level=None, fill_value=None, axis: 'Axis' = 0)
|
67,749 |
pandas.core.series
|
_get_cacher
|
return my cacher or None
|
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
|
(self)
|
67,754 |
pandas.core.series
|
_get_rows_with_mask
| null |
def _get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Series:
new_mgr = self._mgr.get_rows_with_mask(indexer)
return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self)
|
(self, indexer: 'npt.NDArray[np.bool_]') -> 'Series'
|
67,755 |
pandas.core.series
|
_get_value
|
Quickly retrieve single value at passed index label.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
scalar value
|
def _get_value(self, label, takeable: bool = False):
"""
Quickly retrieve single value at passed index label.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
scalar value
"""
if takeable:
return self._values[label]
# Similar to Index.get_value, but we do not fall back to positional
loc = self.index.get_loc(label)
if is_integer(loc):
return self._values[loc]
if isinstance(self.index, MultiIndex):
mi = self.index
new_values = self._values[loc]
if len(new_values) == 1 and mi.nlevels == 1:
# If more than one level left, we can not return a scalar
return new_values[0]
new_index = mi[loc]
new_index = maybe_droplevels(new_index, label)
new_ser = self._constructor(
new_values, index=new_index, name=self.name, copy=False
)
if isinstance(loc, slice):
new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type]
return new_ser.__finalize__(self)
else:
return self.iloc[loc]
|
(self, label, takeable: bool = False)
|
67,756 |
pandas.core.series
|
_get_values_tuple
| null |
def _get_values_tuple(self, key: tuple):
# mpl hackaround
if com.any_none(*key):
# mpl compat if we look up e.g. ser[:, np.newaxis];
# see tests.series.timeseries.test_mpl_compat_hack
# the asarray is needed to avoid returning a 2D DatetimeArray
result = np.asarray(self._values[key])
disallow_ndim_indexing(result)
return result
if not isinstance(self.index, MultiIndex):
raise KeyError("key of type tuple not found and not a MultiIndex")
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
new_ser = self._constructor(self._values[indexer], index=new_index, copy=False)
if isinstance(indexer, slice):
new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type]
return new_ser.__finalize__(self)
|
(self, key: tuple)
|
67,757 |
pandas.core.series
|
_get_with
| null |
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
"supported, use the appropriate DataFrame column"
)
elif isinstance(key, tuple):
return self._get_values_tuple(key)
elif not is_list_like(key):
# e.g. scalars that aren't recognized by lib.is_scalar, GH#32684
return self.loc[key]
if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):
key = list(key)
key_type = lib.infer_dtype(key, skipna=False)
# Note: The key_type == "boolean" case should be caught by the
# com.is_bool_indexer check in __getitem__
if key_type == "integer":
# We need to decide whether to treat this as a positional indexer
# (i.e. self.iloc) or label-based (i.e. self.loc)
if not self.index._should_fallback_to_positional:
return self.loc[key]
else:
warnings.warn(
# GH#50617
"Series.__getitem__ treating keys as positions is deprecated. "
"In a future version, integer keys will always be treated "
"as labels (consistent with DataFrame behavior). To access "
"a value by position, use `ser.iloc[pos]`",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.iloc[key]
# handle the dup indexing case GH#4246
return self.loc[key]
|
(self, key)
|
67,759 |
pandas.core.series
|
_gotitem
|
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
Requested ndim of result.
subset : object, default None
Subset to act on.
|
def _gotitem(self, key, ndim, subset=None) -> Self:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
Requested ndim of result.
subset : object, default None
Subset to act on.
"""
return self
|
(self, key, ndim, subset=None) -> 'Self'
|
67,761 |
pandas.core.series
|
_init_dict
|
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series.
index : Index or None, default None
Index for the new Series: if None, use dict keys.
dtype : np.dtype, ExtensionDtype, or None, default None
The dtype for the new Series: if None, infer from data.
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
|
def _init_dict(
self, data: Mapping, index: Index | None = None, dtype: DtypeObj | None = None
):
"""
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series.
index : Index or None, default None
Index for the new Series: if None, use dict keys.
dtype : np.dtype, ExtensionDtype, or None, default None
The dtype for the new Series: if None, infer from data.
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
keys: Index | tuple
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
# GH:34717, issue was using zip to extract key and values from data.
# using generators in effects the performance.
# Below is the new way of extracting the keys and values
keys = tuple(data.keys())
values = list(data.values()) # Generating list of values- faster way
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
if len(index) or dtype is not None:
values = na_value_for_dtype(pandas_dtype(dtype), compat=False)
else:
values = []
keys = index
else:
keys, values = default_index(0), []
# Input is now list-like, so rely on "standard" construction:
s = Series(values, index=keys, dtype=dtype)
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
return s._mgr, s.index
|
(self, data: 'Mapping', index: 'Index | None' = None, dtype: 'DtypeObj | None' = None)
|
67,767 |
pandas.core.series
|
_ixs
|
Return the i-th value or values in the Series by location.
Parameters
----------
i : int
Returns
-------
scalar
|
def _ixs(self, i: int, axis: AxisInt = 0) -> Any:
"""
Return the i-th value or values in the Series by location.
Parameters
----------
i : int
Returns
-------
scalar
"""
return self._values[i]
|
(self, i: 'int', axis: 'AxisInt' = 0) -> 'Any'
|
67,769 |
pandas.core.series
|
_logical_method
| null |
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
self, other = self._align_for_op(other, align_asobject=True)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
|
(self, other, op)
|
67,771 |
pandas.core.series
|
_maybe_update_cacher
|
See NDFrame._maybe_update_cacher.__doc__
|
def _maybe_update_cacher(
self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False
) -> None:
"""
See NDFrame._maybe_update_cacher.__doc__
"""
# for CoW, we never want to update the parent DataFrame cache
# if the Series changed, but don't keep track of any cacher
if using_copy_on_write():
return
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref: DataFrame = cacher[1]()
# we are trying to reference a dead referent, hence
# a copy
if ref is None:
del self._cacher
elif len(self) == len(ref) and self.name in ref.columns:
# GH#42530 self.name must be in ref.columns
# to ensure column still in dataframe
# otherwise, either self or ref has swapped in new arrays
ref._maybe_cache_changed(cacher[0], self, inplace=inplace)
else:
# GH#33675 we have swapped in a new array, so parent
# reference to self is now invalid
ref._item_cache.pop(cacher[0], None)
super()._maybe_update_cacher(
clear=clear, verify_is_copy=verify_is_copy, inplace=inplace
)
|
(self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False) -> NoneType
|
67,774 |
pandas.core.series
|
_needs_reindex_multi
|
Check if we do need a multi reindex; this is for compat with
higher dims.
|
def _needs_reindex_multi(self, axes, method, level) -> bool:
"""
Check if we do need a multi reindex; this is for compat with
higher dims.
"""
return False
|
(self, axes, method, level) -> bool
|
67,777 |
pandas.core.series
|
_reduce
|
Perform a reduction operation.
If we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object.
|
def _reduce(
self,
op,
# error: Variable "pandas.core.series.Series.str" is not valid as a type
name: str, # type: ignore[valid-type]
*,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool = False,
filter_type=None,
**kwds,
):
"""
Perform a reduction operation.
If we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object.
"""
delegate = self._values
if axis is not None:
self._get_axis_number(axis)
if isinstance(delegate, ExtensionArray):
# dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
else:
# dispatch to numpy arrays
if numeric_only and self.dtype.kind not in "iufcb":
# i.e. not is_numeric_dtype(self.dtype)
kwd_name = "numeric_only"
if name in ["any", "all"]:
kwd_name = "bool_only"
# GH#47500 - change to TypeError to match other methods
raise TypeError(
f"Series.{name} does not allow {kwd_name}={numeric_only} "
"with non-numeric dtypes."
)
return op(delegate, skipna=skipna, **kwds)
|
(self, op, name: 'str', *, axis: 'Axis' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, filter_type=None, **kwds)
|
67,779 |
pandas.core.series
|
_reindex_indexer
| null |
def _reindex_indexer(
self,
new_index: Index | None,
indexer: npt.NDArray[np.intp] | None,
copy: bool | None,
) -> Series:
# Note: new_index is None iff indexer is None
# if not None, indexer is np.intp
if indexer is None and (
new_index is None or new_index.names == self.index.names
):
if using_copy_on_write():
return self.copy(deep=copy)
if copy or copy is None:
return self.copy(deep=copy)
return self
new_values = algorithms.take_nd(
self._values, indexer, allow_fill=True, fill_value=None
)
return self._constructor(new_values, index=new_index, copy=False)
|
(self, new_index: 'Index | None', indexer: 'npt.NDArray[np.intp] | None', copy: 'bool | None') -> 'Series'
|
67,780 |
pandas.core.generic
|
_reindex_multi
| null |
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
|
(self, axes, copy, fill_value)
|
67,783 |
pandas.core.series
|
_replace_single
|
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
|
def _replace_single(self, to_replace, method: str, inplace: bool, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
result = self if inplace else self.copy()
values = result._values
mask = missing.mask_missing(values, to_replace)
if isinstance(values, ExtensionArray):
# dispatch to the EA's _pad_mask_inplace method
values._fill_mask_inplace(method, limit, mask)
else:
fill_f = missing.get_fill_func(method)
fill_f(values, limit=limit, mask=mask)
if inplace:
return
return result
|
(self, to_replace, method: str, inplace: bool, limit)
|
67,787 |
pandas.core.series
|
_reset_cacher
|
Reset the cacher.
|
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
|
(self) -> NoneType
|
67,788 |
pandas.core.series
|
_set_as_cached
|
Set the _cacher attribute on the calling object with a weakref to
cacher.
|
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
if using_copy_on_write():
return
self._cacher = (item, weakref.ref(cacher))
|
(self, item, cacher) -> NoneType
|
67,793 |
pandas.core.series
|
_set_labels
| null |
def _set_labels(self, key, value, warn: bool = True) -> None:
key = com.asarray_tuplesafe(key)
indexer: np.ndarray = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise KeyError(f"{key[mask]} not in index")
self._set_values(indexer, value, warn=warn)
|
(self, key, value, warn: bool = True) -> NoneType
|
67,794 |
pandas.core.series
|
_set_name
|
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
deep : bool|None, default None
Whether to do a deep copy, a shallow copy, or Copy on Write(None)
|
def _set_name(
self, name, inplace: bool = False, deep: bool | None = None
) -> Series:
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
deep : bool|None, default None
Whether to do a deep copy, a shallow copy, or Copy on Write(None)
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy(deep and not using_copy_on_write())
ser.name = name
return ser
|
(self, name, inplace: bool = False, deep: Optional[bool] = None) -> pandas.core.series.Series
|
67,795 |
pandas.core.series
|
_set_value
|
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
|
def _set_value(self, label, value, takeable: bool = False) -> None:
"""
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
"""
if not takeable:
try:
loc = self.index.get_loc(label)
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return
else:
loc = label
self._set_values(loc, value)
|
(self, label, value, takeable: bool = False) -> NoneType
|
67,796 |
pandas.core.series
|
_set_values
| null |
def _set_values(self, key, value, warn: bool = True) -> None:
if isinstance(key, (Index, Series)):
key = key._values
self._mgr = self._mgr.setitem(indexer=key, value=value, warn=warn)
self._maybe_update_cacher()
|
(self, key, value, warn: bool = True) -> NoneType
|
67,797 |
pandas.core.series
|
_set_with
| null |
def _set_with(self, key, value, warn: bool = True) -> None:
# We got here via exception-handling off of InvalidIndexError, so
# key should always be listlike at this point.
assert not isinstance(key, tuple)
if is_iterator(key):
# Without this, the call to infer_dtype will consume the generator
key = list(key)
if not self.index._should_fallback_to_positional:
# Regardless of the key type, we're treating it as labels
self._set_labels(key, value, warn=warn)
else:
# Note: key_type == "boolean" should not occur because that
# should be caught by the is_bool_indexer check in __setitem__
key_type = lib.infer_dtype(key, skipna=False)
if key_type == "integer":
warnings.warn(
# GH#50617
"Series.__setitem__ treating keys as positions is deprecated. "
"In a future version, integer keys will always be treated "
"as labels (consistent with DataFrame behavior). To set "
"a value by position, use `ser.iloc[pos] = value`",
FutureWarning,
stacklevel=find_stack_level(),
)
self._set_values(key, value, warn=warn)
else:
self._set_labels(key, value, warn=warn)
|
(self, key, value, warn: bool = True) -> NoneType
|
67,798 |
pandas.core.series
|
_set_with_engine
| null |
def _set_with_engine(self, key, value, warn: bool = True) -> None:
loc = self.index.get_loc(key)
# this is equivalent to self._values[key] = value
self._mgr.setitem_inplace(loc, value, warn=warn)
|
(self, key, value, warn: bool = True) -> NoneType
|
67,800 |
pandas.core.series
|
_slice
| null |
def _slice(self, slobj: slice, axis: AxisInt = 0) -> Series:
# axis kwarg is retained for compat with NDFrame method
# _slice is *always* positional
mgr = self._mgr.get_slice(slobj, axis=axis)
out = self._constructor_from_mgr(mgr, axes=mgr.axes)
out._name = self._name
return out.__finalize__(self)
|
(self, slobj: 'slice', axis: 'AxisInt' = 0) -> 'Series'
|
67,808 |
pandas.core.series
|
add
|
Return Addition of series and other, element-wise (binary operator `add`).
Equivalent to ``series + other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.radd : Reverse of the Addition operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.add(b, fill_value=0)
a 2.0
b 1.0
c 1.0
d 1.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("add", "series"))
def add(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.add, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,811 |
pandas.core.series
|
aggregate
|
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a Series or when passed to Series.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Transform function producing a Series with like indexes.
Notes
-----
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
A passed user-defined-function will be passed a Series for evaluation.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.agg('min')
1
>>> s.agg(['min', 'max'])
min 1
max 4
dtype: int64
|
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
# if func is None, will switch to user-provided "named aggregation" kwargs
if func is None:
func = dict(kwargs.items())
op = SeriesApply(self, func, args=args, kwargs=kwargs)
result = op.agg()
return result
|
(self, func=None, axis: 'Axis' = 0, *args, **kwargs)
|
67,814 |
pandas.core.series
|
all
|
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty).
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced. For `Series` this parameter
is unused and defaults to 0.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default False
Include only boolean columns. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be True, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
scalar or Series
If level is specified, then, Series is returned; otherwise, scalar
is returned.
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([], dtype="float64").all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if values in each column all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if values in each row all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
|
@Appender(make_doc("all", ndim=1))
def all(
self,
axis: Axis = 0,
bool_only: bool = False,
skipna: bool = True,
**kwargs,
) -> bool:
nv.validate_logical_func((), kwargs, fname="all")
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
return self._reduce(
nanops.nanall,
name="all",
axis=axis,
numeric_only=bool_only,
skipna=skipna,
filter_type="bool",
)
|
(self, axis: 'Axis' = 0, bool_only: 'bool' = False, skipna: 'bool' = True, **kwargs) -> 'bool'
|
67,815 |
pandas.core.series
|
any
|
Return whether any element is True, potentially over an axis.
Returns False unless there is at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty).
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced. For `Series` this parameter
is unused and defaults to 0.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default False
Include only boolean columns. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be False, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
scalar or Series
If level is specified, then, Series is returned; otherwise, scalar
is returned.
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([], dtype="float64").any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
|
@Appender(make_doc("any", ndim=1))
# error: Signature of "any" incompatible with supertype "NDFrame"
def any( # type: ignore[override]
self,
*,
axis: Axis = 0,
bool_only: bool = False,
skipna: bool = True,
**kwargs,
) -> bool:
nv.validate_logical_func((), kwargs, fname="any")
validate_bool_kwarg(skipna, "skipna", none_allowed=False)
return self._reduce(
nanops.nanany,
name="any",
axis=axis,
numeric_only=bool_only,
skipna=skipna,
filter_type="bool",
)
|
(self, *, axis: 'Axis' = 0, bool_only: 'bool' = False, skipna: 'bool' = True, **kwargs) -> 'bool'
|
67,816 |
pandas.core.series
|
apply
|
Invoke function on values of Series.
Can be ufunc (a NumPy function that applies to the entire Series)
or a Python function that only works on single values.
Parameters
----------
func : function
Python function or NumPy ufunc to apply.
convert_dtype : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object. Note that the dtype is always
preserved for some extension array dtypes, such as Categorical.
.. deprecated:: 2.1.0
``convert_dtype`` has been deprecated. Do ``ser.astype(object).apply()``
instead if you want ``convert_dtype=False``.
args : tuple
Positional arguments passed to func after the series value.
by_row : False or "compat", default "compat"
If ``"compat"`` and func is a callable, func will be passed each element of
the Series, like ``Series.map``. If func is a list or dict of
callables, will first try to translate each func into pandas methods. If
that doesn't work, will try call to apply again with ``by_row="compat"``
and if that fails, will call apply again with ``by_row=False``
(backward compatible).
If False, the func will be passed the whole Series at once.
``by_row`` has no effect when ``func`` is a string.
.. versionadded:: 2.1.0
**kwargs
Additional keyword arguments passed to func.
Returns
-------
Series or DataFrame
If func returns a Series object the result will be a DataFrame.
See Also
--------
Series.map: For element-wise operations.
Series.agg: Only perform aggregating type operations.
Series.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
Create a series with typical summer temperatures for each city.
>>> s = pd.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> s.apply(lambda x: x ** 2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
|
def apply(
self,
func: AggFuncType,
convert_dtype: bool | lib.NoDefault = lib.no_default,
args: tuple[Any, ...] = (),
*,
by_row: Literal[False, "compat"] = "compat",
**kwargs,
) -> DataFrame | Series:
"""
Invoke function on values of Series.
Can be ufunc (a NumPy function that applies to the entire Series)
or a Python function that only works on single values.
Parameters
----------
func : function
Python function or NumPy ufunc to apply.
convert_dtype : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object. Note that the dtype is always
preserved for some extension array dtypes, such as Categorical.
.. deprecated:: 2.1.0
``convert_dtype`` has been deprecated. Do ``ser.astype(object).apply()``
instead if you want ``convert_dtype=False``.
args : tuple
Positional arguments passed to func after the series value.
by_row : False or "compat", default "compat"
If ``"compat"`` and func is a callable, func will be passed each element of
the Series, like ``Series.map``. If func is a list or dict of
callables, will first try to translate each func into pandas methods. If
that doesn't work, will try call to apply again with ``by_row="compat"``
and if that fails, will call apply again with ``by_row=False``
(backward compatible).
If False, the func will be passed the whole Series at once.
``by_row`` has no effect when ``func`` is a string.
.. versionadded:: 2.1.0
**kwargs
Additional keyword arguments passed to func.
Returns
-------
Series or DataFrame
If func returns a Series object the result will be a DataFrame.
See Also
--------
Series.map: For element-wise operations.
Series.agg: Only perform aggregating type operations.
Series.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
Create a series with typical summer temperatures for each city.
>>> s = pd.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> s.apply(lambda x: x ** 2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
return SeriesApply(
self,
func,
convert_dtype=convert_dtype,
by_row=by_row,
args=args,
kwargs=kwargs,
).apply()
|
(self, func: 'AggFuncType', convert_dtype: 'bool | lib.NoDefault' = <no_default>, args: 'tuple[Any, ...]' = (), *, by_row: "Literal[False, 'compat']" = 'compat', **kwargs) -> 'DataFrame | Series'
|
67,817 |
pandas.core.base
|
argmax
|
Return int position of the largest value in the Series.
If the maximum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {None}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the maximum value.
See Also
--------
Series.argmax : Return position of the maximum value.
Series.argmin : Return position of the minimum value.
numpy.ndarray.argmax : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
|
@doc(op="max", oppose="min", value="largest")
def argmax(
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
) -> int:
"""
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
"""
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
warnings.warn(
f"The behavior of {type(self).__name__}.argmax/argmin "
"with skipna=False and NAs, or with all-NAs is deprecated. "
"In a future version this will raise ValueError.",
FutureWarning,
stacklevel=find_stack_level(),
)
return -1
else:
return delegate.argmax()
else:
result = nanops.nanargmax(delegate, skipna=skipna)
if result == -1:
warnings.warn(
f"The behavior of {type(self).__name__}.argmax/argmin "
"with skipna=False and NAs, or with all-NAs is deprecated. "
"In a future version this will raise ValueError.",
FutureWarning,
stacklevel=find_stack_level(),
)
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return result # type: ignore[return-value]
|
(self, axis: Optional[int] = None, skipna: bool = True, *args, **kwargs) -> int
|
67,818 |
pandas.core.base
|
argmin
|
Return int position of the smallest value in the Series.
If the minimum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {None}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the minimum value.
See Also
--------
Series.argmin : Return position of the minimum value.
Series.argmax : Return position of the maximum value.
numpy.ndarray.argmin : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
|
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
warnings.warn(
f"The behavior of {type(self).__name__}.argmax/argmin "
"with skipna=False and NAs, or with all-NAs is deprecated. "
"In a future version this will raise ValueError.",
FutureWarning,
stacklevel=find_stack_level(),
)
return -1
else:
return delegate.argmin()
else:
result = nanops.nanargmin(delegate, skipna=skipna)
if result == -1:
warnings.warn(
f"The behavior of {type(self).__name__}.argmax/argmin "
"with skipna=False and NAs, or with all-NAs is deprecated. "
"In a future version this will raise ValueError.",
FutureWarning,
stacklevel=find_stack_level(),
)
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return result # type: ignore[return-value]
|
(self, axis: Optional[int] = None, skipna: bool = True, *args, **kwargs) -> int
|
67,819 |
pandas.core.series
|
argsort
|
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
stable : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
Examples
--------
>>> s = pd.Series([3, 2, 1])
>>> s.argsort()
0 2
1 1
2 0
dtype: int64
|
def argsort(
self,
axis: Axis = 0,
kind: SortKind = "quicksort",
order: None = None,
stable: None = None,
) -> Series:
"""
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
stable : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
Examples
--------
>>> s = pd.Series([3, 2, 1])
>>> s.argsort()
0 2
1 1
2 0
dtype: int64
"""
if axis != -1:
# GH#54257 We allow -1 here so that np.argsort(series) works
self._get_axis_number(axis)
values = self._values
mask = isna(values)
if mask.any():
# TODO(3.0): once this deprecation is enforced we can call
# self.array.argsort directly, which will close GH#43840 and
# GH#12694
warnings.warn(
"The behavior of Series.argsort in the presence of NA values is "
"deprecated. In a future version, NA values will be ordered "
"last instead of set to -1.",
FutureWarning,
stacklevel=find_stack_level(),
)
result = np.full(len(self), -1, dtype=np.intp)
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
else:
result = np.argsort(values, kind=kind)
res = self._constructor(
result, index=self.index, name=self.name, dtype=np.intp, copy=False
)
return res.__finalize__(self, method="argsort")
|
(self, axis: 'Axis' = 0, kind: 'SortKind' = 'quicksort', order: 'None' = None, stable: 'None' = None) -> 'Series'
|
67,824 |
pandas.core.series
|
autocorr
|
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
|
def autocorr(self, lag: int = 1) -> float:
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(cast(Series, self.shift(lag)))
|
(self, lag: int = 1) -> float
|
67,826 |
pandas.core.series
|
between
|
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : {"both", "neither", "left", "right"}
Include boundaries. Whether to set each bound as closed or open.
.. versionchanged:: 1.3.0
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``"neither"`` boundary values are excluded:
>>> s.between(1, 4, inclusive="neither")
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
|
def between(
self,
left,
right,
inclusive: Literal["both", "neither", "left", "right"] = "both",
) -> Series:
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : {"both", "neither", "left", "right"}
Include boundaries. Whether to set each bound as closed or open.
.. versionchanged:: 1.3.0
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``"neither"`` boundary values are excluded:
>>> s.between(1, 4, inclusive="neither")
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive == "both":
lmask = self >= left
rmask = self <= right
elif inclusive == "left":
lmask = self >= left
rmask = self < right
elif inclusive == "right":
lmask = self > left
rmask = self <= right
elif inclusive == "neither":
lmask = self > left
rmask = self < right
else:
raise ValueError(
"Inclusive has to be either string of 'both',"
"'left', 'right', or 'neither'."
)
return lmask & rmask
|
(self, left, right, inclusive: Literal['both', 'neither', 'left', 'right'] = 'both') -> pandas.core.series.Series
|
67,830 |
pandas.core.series
|
case_when
|
Replace values where the conditions are True.
Parameters
----------
caselist : A list of tuples of conditions and expected replacements
Takes the form: ``(condition0, replacement0)``,
``(condition1, replacement1)``, ... .
``condition`` should be a 1-D boolean array-like object
or a callable. If ``condition`` is a callable,
it is computed on the Series
and should return a boolean Series or array.
The callable must not change the input Series
(though pandas doesn`t check it). ``replacement`` should be a
1-D array-like object, a scalar or a callable.
If ``replacement`` is a callable, it is computed on the Series
and should return a scalar or Series. The callable
must not change the input Series
(though pandas doesn`t check it).
.. versionadded:: 2.2.0
Returns
-------
Series
See Also
--------
Series.mask : Replace values where the condition is True.
Examples
--------
>>> c = pd.Series([6, 7, 8, 9], name='c')
>>> a = pd.Series([0, 0, 1, 2])
>>> b = pd.Series([0, 3, 4, 5])
>>> c.case_when(caselist=[(a.gt(0), a), # condition, replacement
... (b.gt(0), b)])
0 6
1 3
2 1
3 2
Name: c, dtype: int64
|
def case_when(
self,
caselist: list[
tuple[
ArrayLike | Callable[[Series], Series | np.ndarray | Sequence[bool]],
ArrayLike | Scalar | Callable[[Series], Series | np.ndarray],
],
],
) -> Series:
"""
Replace values where the conditions are True.
Parameters
----------
caselist : A list of tuples of conditions and expected replacements
Takes the form: ``(condition0, replacement0)``,
``(condition1, replacement1)``, ... .
``condition`` should be a 1-D boolean array-like object
or a callable. If ``condition`` is a callable,
it is computed on the Series
and should return a boolean Series or array.
The callable must not change the input Series
(though pandas doesn`t check it). ``replacement`` should be a
1-D array-like object, a scalar or a callable.
If ``replacement`` is a callable, it is computed on the Series
and should return a scalar or Series. The callable
must not change the input Series
(though pandas doesn`t check it).
.. versionadded:: 2.2.0
Returns
-------
Series
See Also
--------
Series.mask : Replace values where the condition is True.
Examples
--------
>>> c = pd.Series([6, 7, 8, 9], name='c')
>>> a = pd.Series([0, 0, 1, 2])
>>> b = pd.Series([0, 3, 4, 5])
>>> c.case_when(caselist=[(a.gt(0), a), # condition, replacement
... (b.gt(0), b)])
0 6
1 3
2 1
3 2
Name: c, dtype: int64
"""
if not isinstance(caselist, list):
raise TypeError(
f"The caselist argument should be a list; instead got {type(caselist)}"
)
if not caselist:
raise ValueError(
"provide at least one boolean condition, "
"with a corresponding replacement."
)
for num, entry in enumerate(caselist):
if not isinstance(entry, tuple):
raise TypeError(
f"Argument {num} must be a tuple; instead got {type(entry)}."
)
if len(entry) != 2:
raise ValueError(
f"Argument {num} must have length 2; "
"a condition and replacement; "
f"instead got length {len(entry)}."
)
caselist = [
(
com.apply_if_callable(condition, self),
com.apply_if_callable(replacement, self),
)
for condition, replacement in caselist
]
default = self.copy()
conditions, replacements = zip(*caselist)
common_dtypes = [infer_dtype_from(arg)[0] for arg in [*replacements, default]]
if len(set(common_dtypes)) > 1:
common_dtype = find_common_type(common_dtypes)
updated_replacements = []
for condition, replacement in zip(conditions, replacements):
if is_scalar(replacement):
replacement = construct_1d_arraylike_from_scalar(
value=replacement, length=len(condition), dtype=common_dtype
)
elif isinstance(replacement, ABCSeries):
replacement = replacement.astype(common_dtype)
else:
replacement = pd_array(replacement, dtype=common_dtype)
updated_replacements.append(replacement)
replacements = updated_replacements
default = default.astype(common_dtype)
counter = reversed(range(len(conditions)))
for position, condition, replacement in zip(
counter, conditions[::-1], replacements[::-1]
):
try:
default = default.mask(
condition, other=replacement, axis=0, inplace=False, level=None
)
except Exception as error:
raise ValueError(
f"Failed to apply condition{position} and replacement{position}."
) from error
return default
|
(self, caselist: 'list[tuple[ArrayLike | Callable[[Series], Series | np.ndarray | Sequence[bool]], ArrayLike | Scalar | Callable[[Series], Series | np.ndarray]],]') -> 'Series'
|
67,832 |
pandas.core.series
|
combine
|
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
|
def combine(
self,
other: Series | Hashable,
func: Callable[[Hashable, Hashable], Hashable],
fill_value: Hashable | None = None,
) -> Series:
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = np.empty(len(new_index), dtype=object)
with np.errstate(all="ignore"):
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
new_values[i] = func(lv, rv)
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
new_values = np.empty(len(new_index), dtype=object)
with np.errstate(all="ignore"):
new_values[:] = [func(lv, other) for lv in self._values]
new_name = self.name
# try_float=False is to match agg_series
npvalues = lib.maybe_convert_objects(new_values, try_float=False)
# same_dtype here is a kludge to avoid casting e.g. [True, False] to
# ["True", "False"]
same_dtype = isinstance(self.dtype, (StringDtype, CategoricalDtype))
res_values = maybe_cast_pointwise_result(
npvalues, self.dtype, same_dtype=same_dtype
)
return self._constructor(res_values, index=new_index, name=new_name, copy=False)
|
(self, other: pandas.core.series.Series | collections.abc.Hashable, func: Callable[[collections.abc.Hashable, collections.abc.Hashable], collections.abc.Hashable], fill_value: Optional[collections.abc.Hashable] = None) -> pandas.core.series.Series
|
67,833 |
pandas.core.series
|
combine_first
|
Update null elements with value in the same location in 'other'.
Combine two Series objects by filling null values in one Series with
non-null values from the other Series. Result index will be the union
of the two indexes.
Parameters
----------
other : Series
The value(s) to be used for filling null values.
Returns
-------
Series
The result of combining the provided Series with the other object.
See Also
--------
Series.combine : Perform element-wise operation on two Series
using a given function.
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4, 5])
>>> s1.combine_first(s2)
0 1.0
1 4.0
2 5.0
dtype: float64
Null values still persist if the location of that null value
does not exist in `other`
>>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0})
>>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0})
>>> s1.combine_first(s2)
duck 30.0
eagle 160.0
falcon NaN
dtype: float64
|
def combine_first(self, other) -> Series:
"""
Update null elements with value in the same location in 'other'.
Combine two Series objects by filling null values in one Series with
non-null values from the other Series. Result index will be the union
of the two indexes.
Parameters
----------
other : Series
The value(s) to be used for filling null values.
Returns
-------
Series
The result of combining the provided Series with the other object.
See Also
--------
Series.combine : Perform element-wise operation on two Series
using a given function.
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4, 5])
>>> s1.combine_first(s2)
0 1.0
1 4.0
2 5.0
dtype: float64
Null values still persist if the location of that null value
does not exist in `other`
>>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0})
>>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0})
>>> s1.combine_first(s2)
duck 30.0
eagle 160.0
falcon NaN
dtype: float64
"""
from pandas.core.reshape.concat import concat
if self.dtype == other.dtype:
if self.index.equals(other.index):
return self.mask(self.isna(), other)
elif self._can_hold_na and not isinstance(self.dtype, SparseDtype):
this, other = self.align(other, join="outer")
return this.mask(this.isna(), other)
new_index = self.index.union(other.index)
this = self
# identify the index subset to keep for each series
keep_other = other.index.difference(this.index[notna(this)])
keep_this = this.index.difference(keep_other)
this = this.reindex(keep_this, copy=False)
other = other.reindex(keep_other, copy=False)
if this.dtype.kind == "M" and other.dtype.kind != "M":
other = to_datetime(other)
combined = concat([this, other])
combined = combined.reindex(new_index, copy=False)
return combined.__finalize__(self, method="combine_first")
|
(self, other) -> pandas.core.series.Series
|
67,834 |
pandas.core.series
|
compare
|
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
align_axis : {0 or 'index', 1 or 'columns'}, default 1
Determine which axis to align the comparison on.
* 0, or 'index' : Resulting differences are stacked vertically
with rows drawn alternately from self and other.
* 1, or 'columns' : Resulting differences are aligned horizontally
with columns drawn alternately from self and other.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
result_names : tuple, default ('self', 'other')
Set the dataframes names in the comparison.
.. versionadded:: 1.5.0
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
|
@doc(
_shared_docs["compare"],
dedent(
"""
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
"""
),
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: Series,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
result_names: Suffixes = ("self", "other"),
) -> DataFrame | Series:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
result_names=result_names,
)
|
(self, other: 'Series', align_axis: 'Axis' = 1, keep_shape: 'bool' = False, keep_equal: 'bool' = False, result_names: 'Suffixes' = ('self', 'other')) -> 'DataFrame | Series'
|
67,837 |
pandas.core.series
|
corr
|
Compute correlation with `other` Series, excluding missing values.
The two `Series` objects are not required to be the same length and will be
aligned internally before the correlation function is applied.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
Method used to compute correlation:
- pearson : Standard correlation coefficient
- kendall : Kendall Tau correlation coefficient
- spearman : Spearman rank correlation
- callable: Callable with input two 1d ndarrays and returning a float.
.. warning::
Note that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
See Also
--------
DataFrame.corr : Compute pairwise correlation between columns.
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Notes
-----
Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations.
* `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
* `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_
* `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_
Automatic data alignment: as with all pandas operations, automatic data alignment is performed for this method.
``corr()`` automatically considers values with matching indices.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
Pandas auto-aligns the values with matching indices
>>> s1 = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> s2 = pd.Series([1, 2, 3], index=[2, 1, 0])
>>> s1.corr(s2)
-1.0
|
def corr(
self,
other: Series,
method: CorrelationMethod = "pearson",
min_periods: int | None = None,
) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
The two `Series` objects are not required to be the same length and will be
aligned internally before the correlation function is applied.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
Method used to compute correlation:
- pearson : Standard correlation coefficient
- kendall : Kendall Tau correlation coefficient
- spearman : Spearman rank correlation
- callable: Callable with input two 1d ndarrays and returning a float.
.. warning::
Note that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
See Also
--------
DataFrame.corr : Compute pairwise correlation between columns.
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Notes
-----
Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations.
* `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
* `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_
* `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_
Automatic data alignment: as with all pandas operations, automatic data alignment is performed for this method.
``corr()`` automatically considers values with matching indices.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
Pandas auto-aligns the values with matching indices
>>> s1 = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> s2 = pd.Series([1, 2, 3], index=[2, 1, 0])
>>> s1.corr(s2)
-1.0
""" # noqa: E501
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False)
other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method in ["pearson", "spearman", "kendall"] or callable(method):
return nanops.nancorr(
this_values, other_values, method=method, min_periods=min_periods
)
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
|
(self, other: 'Series', method: 'CorrelationMethod' = 'pearson', min_periods: 'int | None' = None) -> 'float'
|
67,838 |
pandas.core.series
|
count
|
Return number of non-NA/null observations in the Series.
Returns
-------
int
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
|
def count(self) -> int:
"""
Return number of non-NA/null observations in the Series.
Returns
-------
int
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
return notna(self._values).sum().astype("int64")
|
(self) -> int
|
67,839 |
pandas.core.series
|
cov
|
Compute covariance with Series, excluding missing values.
The two `Series` objects are not required to be the same length and
will be aligned internally before the covariance is calculated.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
|
def cov(
self,
other: Series,
min_periods: int | None = None,
ddof: int | None = 1,
) -> float:
"""
Compute covariance with Series, excluding missing values.
The two `Series` objects are not required to be the same length and
will be aligned internally before the covariance is calculated.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False)
other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False)
return nanops.nancov(
this_values, other_values, min_periods=min_periods, ddof=ddof
)
|
(self, other: pandas.core.series.Series, min_periods: Optional[int] = None, ddof: int | None = 1) -> float
|
67,840 |
pandas.core.series
|
cummax
|
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
maximum.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
For `Series` this parameter is unused and defaults to 0.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
scalar or Series
Return cumulative maximum of scalar or Series.
See Also
--------
core.window.expanding.Expanding.max : Similar functionality
but ignores ``NaN`` values.
Series.max : Return the maximum over
Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
|
@doc(make_doc("cummax", ndim=1))
def cummax(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs):
return NDFrame.cummax(self, axis, skipna, *args, **kwargs)
|
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, *args, **kwargs)
|
67,841 |
pandas.core.series
|
cummin
|
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
minimum.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
For `Series` this parameter is unused and defaults to 0.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
scalar or Series
Return cumulative minimum of scalar or Series.
See Also
--------
core.window.expanding.Expanding.min : Similar functionality
but ignores ``NaN`` values.
Series.min : Return the minimum over
Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
|
@doc(make_doc("cummin", ndim=1))
def cummin(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs):
return NDFrame.cummin(self, axis, skipna, *args, **kwargs)
|
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, *args, **kwargs)
|
67,842 |
pandas.core.series
|
cumprod
|
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
product.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
For `Series` this parameter is unused and defaults to 0.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
scalar or Series
Return cumulative product of scalar or Series.
See Also
--------
core.window.expanding.Expanding.prod : Similar functionality
but ignores ``NaN`` values.
Series.prod : Return the product over
Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
|
@doc(make_doc("cumprod", 1))
def cumprod(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs):
return NDFrame.cumprod(self, axis, skipna, *args, **kwargs)
|
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, *args, **kwargs)
|
67,843 |
pandas.core.series
|
cumsum
|
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
sum.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
For `Series` this parameter is unused and defaults to 0.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
scalar or Series
Return cumulative sum of scalar or Series.
See Also
--------
core.window.expanding.Expanding.sum : Similar functionality
but ignores ``NaN`` values.
Series.sum : Return the sum over
Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
|
@doc(make_doc("cumsum", ndim=1))
def cumsum(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs):
return NDFrame.cumsum(self, axis, skipna, *args, **kwargs)
|
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, *args, **kwargs)
|
67,845 |
pandas.core.series
|
diff
|
First discrete difference of element.
Calculates the difference of a Series element compared with another
element in the Series (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
Returns
-------
Series
First differences of the Series.
See Also
--------
Series.pct_change: Percent change over given number of periods.
Series.shift: Shift index by desired number of periods with an
optional time freq.
DataFrame.diff: First discrete difference of object.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
The result is calculated according to current dtype in Series,
however dtype of the result is always float64.
Examples
--------
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
Overflow in input dtype
>>> s = pd.Series([1, 0], dtype=np.uint8)
>>> s.diff()
0 NaN
1 255.0
dtype: float64
|
@doc(
klass="Series",
extra_params="",
other_klass="DataFrame",
examples=dedent(
"""
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
Overflow in input dtype
>>> s = pd.Series([1, 0], dtype=np.uint8)
>>> s.diff()
0 NaN
1 255.0
dtype: float64"""
),
)
def diff(self, periods: int = 1) -> Series:
"""
First discrete difference of element.
Calculates the difference of a {klass} element compared with another
element in the {klass} (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
{extra_params}
Returns
-------
{klass}
First differences of the Series.
See Also
--------
{klass}.pct_change: Percent change over given number of periods.
{klass}.shift: Shift index by desired number of periods with an
optional time freq.
{other_klass}.diff: First discrete difference of object.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
The result is calculated according to current dtype in {klass},
however dtype of the result is always float64.
Examples
--------
{examples}
"""
result = algorithms.diff(self._values, periods)
return self._constructor(result, index=self.index, copy=False).__finalize__(
self, method="diff"
)
|
(self, periods: int = 1) -> pandas.core.series.Series
|
67,846 |
pandas.core.series
|
truediv
|
Return Floating division of series and other, element-wise (binary operator `truediv`).
Equivalent to ``series / other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.rtruediv : Reverse of the Floating division operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.divide(b, fill_value=0)
a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("truediv", "series"))
def truediv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.truediv, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,848 |
pandas.core.series
|
divmod
|
Return Integer division and modulo of series and other, element-wise (binary operator `divmod`).
Equivalent to ``divmod(series, other)``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.rdivmod : Reverse of the Integer division and modulo operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.divmod(b, fill_value=0)
(a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64,
a 0.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64)
|
@Appender(ops.make_flex_doc("divmod", "series"))
def divmod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, divmod, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,849 |
pandas.core.series
|
dot
|
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other`.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
|
def dot(self, other: AnyArrayLike) -> Series | np.ndarray:
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other`.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, ABCDataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns, copy=False
).__finalize__(self, method="dot")
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
|
(self, other: 'AnyArrayLike') -> 'Series | np.ndarray'
|
67,850 |
pandas.core.series
|
drop
|
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
Series or None
Series with specified index labels removed or None if ``inplace=True``.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
llama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
llama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
|
def drop(
self,
labels: IndexLabel | None = None,
*,
axis: Axis = 0,
index: IndexLabel | None = None,
columns: IndexLabel | None = None,
level: Level | None = None,
inplace: bool = False,
errors: IgnoreRaise = "raise",
) -> Series | None:
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
Series or None
Series with specified index labels removed or None if ``inplace=True``.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
llama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
llama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
|
(self, labels: 'IndexLabel | None' = None, *, axis: 'Axis' = 0, index: 'IndexLabel | None' = None, columns: 'IndexLabel | None' = None, level: 'Level | None' = None, inplace: 'bool' = False, errors: 'IgnoreRaise' = 'raise') -> 'Series | None'
|
67,851 |
pandas.core.series
|
drop_duplicates
|
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Series.unique : Return unique values as an array.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', 'hippo'],
... name='animal')
>>> s
0 llama
1 cow
2 llama
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 llama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries.
>>> s.drop_duplicates(keep=False)
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
|
def drop_duplicates(
self,
*,
keep: DropKeep = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Series | None:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Series.unique : Return unique values as an array.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', 'hippo'],
... name='animal')
>>> s
0 llama
1 cow
2 llama
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 llama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 llama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries.
>>> s.drop_duplicates(keep=False)
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
if ignore_index:
result.index = default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
|
(self, *, keep: 'DropKeep' = 'first', inplace: 'bool' = False, ignore_index: 'bool' = False) -> 'Series | None'
|
67,853 |
pandas.core.series
|
dropna
|
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.nan, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
|
def dropna(
self,
*,
axis: Axis = 0,
inplace: bool = False,
how: AnyAll | None = None,
ignore_index: bool = False,
) -> Series | None:
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.nan, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
else:
if not inplace:
result = self.copy(deep=None)
else:
result = self
if ignore_index:
result.index = default_index(len(result))
if inplace:
return self._update_inplace(result)
else:
return result
|
(self, *, axis: 'Axis' = 0, inplace: 'bool' = False, how: 'AnyAll | None' = None, ignore_index: 'bool' = False) -> 'Series | None'
|
67,854 |
pandas.core.series
|
duplicated
|
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
|
def duplicated(self, keep: DropKeep = "first") -> Series:
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
res = self._duplicated(keep=keep)
result = self._constructor(res, index=self.index, copy=False)
return result.__finalize__(self, method="duplicated")
|
(self, keep: 'DropKeep' = 'first') -> 'Series'
|
67,855 |
pandas.core.series
|
eq
|
Return Equal to of series and other, element-wise (binary operator `eq`).
Equivalent to ``series == other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.eq(b, fill_value=0)
a True
b False
c False
d False
e False
dtype: bool
|
@Appender(ops.make_flex_doc("eq", "series"))
def eq(
self,
other,
level: Level | None = None,
fill_value: float | None = None,
axis: Axis = 0,
) -> Series:
return self._flex_method(
other, operator.eq, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level: 'Level | None' = None, fill_value: 'float | None' = None, axis: 'Axis' = 0) -> 'Series'
|
67,859 |
pandas.core.series
|
explode
|
Transform each element of a list-like to a row.
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Reference :ref:`the user guide <reshaping.explode>` for more examples.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
|
def explode(self, ignore_index: bool = False) -> Series:
"""
Transform each element of a list-like to a row.
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Reference :ref:`the user guide <reshaping.explode>` for more examples.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if isinstance(self.dtype, ExtensionDtype):
values, counts = self._values._explode()
elif len(self) and is_object_dtype(self.dtype):
values, counts = reshape.explode(np.asarray(self._values))
else:
result = self.copy()
return result.reset_index(drop=True) if ignore_index else result
if ignore_index:
index: Index = default_index(len(values))
else:
index = self.index.repeat(counts)
return self._constructor(values, index=index, name=self.name, copy=False)
|
(self, ignore_index: bool = False) -> pandas.core.series.Series
|
67,866 |
pandas.core.series
|
floordiv
|
Return Integer division of series and other, element-wise (binary operator `floordiv`).
Equivalent to ``series // other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.rfloordiv : Reverse of the Integer division operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.floordiv(b, fill_value=0)
a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("floordiv", "series"))
def floordiv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.floordiv, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,867 |
pandas.core.series
|
ge
|
Return Greater than or equal to of series and other, element-wise (binary operator `ge`).
Equivalent to ``series >= other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
e 1.0
dtype: float64
>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])
>>> b
a 0.0
b 1.0
c 2.0
d NaN
f 1.0
dtype: float64
>>> a.ge(b, fill_value=0)
a True
b True
c False
d False
e True
f False
dtype: bool
|
@Appender(ops.make_flex_doc("ge", "series"))
def ge(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.ge, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,869 |
pandas.core.series
|
groupby
|
Group Series using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, pd.Grouper or list of such
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If a list or ndarray of length
equal to the selected axis is passed (see the `groupby user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#splitting-an-object-into-groups>`_),
the values are used as-is to determine the groups. A label or list
of labels may be passed to group by the columns in ``self``.
Notice that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1). For `Series` this parameter
is unused and defaults to 0.
.. deprecated:: 2.1.0
Will be removed and behave like axis=0 in a future version.
For ``axis=1``, do ``frame.T.groupby(...)`` instead.
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels. Do not specify both ``by`` and ``level``.
as_index : bool, default True
Return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output. This argument has no effect
on filtrations (see the `filtrations in the user guide
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#filtration>`_),
such as ``head()``, ``tail()``, ``nth()`` and in transformations
(see the `transformations in the user guide
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#transformation>`_).
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group. If False,
the groups will appear in the same order as they did in the original DataFrame.
This argument has no effect on filtrations (see the `filtrations in the user guide
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#filtration>`_),
such as ``head()``, ``tail()``, ``nth()`` and in transformations
(see the `transformations in the user guide
<https://pandas.pydata.org/docs/dev/user_guide/groupby.html#transformation>`_).
.. versionchanged:: 2.0.0
Specifying ``sort=False`` with an ordered categorical grouper will no
longer sort the values.
group_keys : bool, default True
When calling apply and the ``by`` argument produces a like-indexed
(i.e. :ref:`a transform <groupby.transform>`) result, add group keys to
index to identify pieces. By default group keys are not included
when the result's index (and column) labels match the inputs, and
are included otherwise.
.. versionchanged:: 1.5.0
Warns that ``group_keys`` will no longer be ignored when the
result from ``apply`` is a like-indexed Series or DataFrame.
Specify ``group_keys`` explicitly to include the group keys or
not.
.. versionchanged:: 2.0.0
``group_keys`` now defaults to ``True``.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. deprecated:: 2.1.0
The default value will change to True in a future version of pandas.
dropna : bool, default True
If True, and if group keys contain NA values, NA values together
with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
pandas.api.typing.SeriesGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`__ for more
detailed usage and examples, including splitting an object into groups,
iterating through groups, selecting a group, aggregation, and more.
Examples
--------
>>> ser = pd.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", "b"]).mean()
a 210.0
b 185.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**Grouping by Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
>>> ser
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Animal
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level="Type").mean()
Type
Captive 210.0
Wild 185.0
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
`dropna` parameter, the default setting is `True`.
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
a 3
b 3
dtype: int64
>>> ser.groupby(level=0, dropna=False).sum()
a 3
b 3
NaN 3
dtype: int64
>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
>>> ser.groupby(["a", "b", "a", np.nan]).mean()
a 210.0
b 350.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
a 210.0
b 350.0
NaN 20.0
Name: Max Speed, dtype: float64
|
@Appender(
dedent(
"""
Examples
--------
>>> ser = pd.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", "b"]).mean()
a 210.0
b 185.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**Grouping by Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
>>> ser
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Animal
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level="Type").mean()
Type
Captive 210.0
Wild 185.0
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
`dropna` parameter, the default setting is `True`.
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
a 3
b 3
dtype: int64
>>> ser.groupby(level=0, dropna=False).sum()
a 3
b 3
NaN 3
dtype: int64
>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
>>> ser.groupby(["a", "b", "a", np.nan]).mean()
a 210.0
b 350.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
a 210.0
b 350.0
NaN 20.0
Name: Max Speed, dtype: float64
"""
)
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis: Axis = 0,
level: IndexLabel | None = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
observed: bool | lib.NoDefault = lib.no_default,
dropna: bool = True,
) -> SeriesGroupBy:
from pandas.core.groupby.generic import SeriesGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
if not as_index:
raise TypeError("as_index=False only valid with DataFrame")
axis = self._get_axis_number(axis)
return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
observed=observed,
dropna=dropna,
)
|
(self, by=None, axis: 'Axis' = 0, level: 'IndexLabel | None' = None, as_index: 'bool' = True, sort: 'bool' = True, group_keys: 'bool' = True, observed: 'bool | lib.NoDefault' = <no_default>, dropna: 'bool' = True) -> 'SeriesGroupBy'
|
67,870 |
pandas.core.series
|
gt
|
Return Greater than of series and other, element-wise (binary operator `gt`).
Equivalent to ``series > other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
e 1.0
dtype: float64
>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])
>>> b
a 0.0
b 1.0
c 2.0
d NaN
f 1.0
dtype: float64
>>> a.gt(b, fill_value=0)
a True
b False
c False
d False
e True
f False
dtype: bool
|
@Appender(ops.make_flex_doc("gt", "series"))
def gt(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.gt, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,872 |
pandas.plotting._core
|
hist_series
|
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
legend : bool, default False
Whether to show the legend.
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
Examples
--------
For Series:
.. plot::
:context: close-figs
>>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> hist = ser.hist()
For Groupby:
.. plot::
:context: close-figs
>>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> hist = ser.groupby(level=0).hist()
|
def hist_series(
self: Series,
by=None,
ax=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot: float | None = None,
ylabelsize: int | None = None,
yrot: float | None = None,
figsize: tuple[int, int] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
legend : bool, default False
Whether to show the legend.
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
Examples
--------
For Series:
.. plot::
:context: close-figs
>>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> hist = ser.hist()
For Groupby:
.. plot::
:context: close-figs
>>> lst = ['a', 'a', 'a', 'b', 'b', 'b']
>>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst)
>>> hist = ser.groupby(level=0).hist()
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
legend=legend,
**kwargs,
)
|
(self: 'Series', by=None, ax=None, grid: 'bool' = True, xlabelsize: 'int | None' = None, xrot: 'float | None' = None, ylabelsize: 'int | None' = None, yrot: 'float | None' = None, figsize: 'tuple[int, int] | None' = None, bins: 'int | Sequence[int]' = 10, backend: 'str | None' = None, legend: 'bool' = False, **kwargs)
|
67,873 |
pandas.core.series
|
idxmax
|
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
|
def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable:
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
axis = self._get_axis_number(axis)
with warnings.catch_warnings():
# TODO(3.0): this catching/filtering can be removed
# ignore warning produced by argmax since we will issue a different
# warning for argmax
warnings.simplefilter("ignore")
i = self.argmax(axis, skipna, *args, **kwargs)
if i == -1:
# GH#43587 give correct NA value for Index.
warnings.warn(
f"The behavior of {type(self).__name__}.idxmax with all-NA "
"values, or any-NA and skipna=False, is deprecated. In a future "
"version this will raise ValueError",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.index._na_value
return self.index[i]
|
(self, axis: 'Axis' = 0, skipna: 'bool' = True, *args, **kwargs) -> 'Hashable'
|
67,874 |
pandas.core.series
|
idxmin
|
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
|
def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable:
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
axis = self._get_axis_number(axis)
with warnings.catch_warnings():
# TODO(3.0): this catching/filtering can be removed
# ignore warning produced by argmin since we will issue a different
# warning for idxmin
warnings.simplefilter("ignore")
i = self.argmin(axis, skipna, *args, **kwargs)
if i == -1:
# GH#43587 give correct NA value for Index.
warnings.warn(
f"The behavior of {type(self).__name__}.idxmin with all-NA "
"values, or any-NA and skipna=False, is deprecated. In a future "
"version this will raise ValueError",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.index._na_value
return self.index[i]
|
(self, axis: 'Axis' = 0, skipna: 'bool' = True, *args, **kwargs) -> 'Hashable'
|
67,876 |
pandas.core.series
|
info
|
Print a concise summary of a Series.
This method prints information about a Series including
the index dtype, non-null values and memory usage.
.. versionadded:: 1.4.0
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
memory_usage : bool, str, optional
Specifies whether total memory usage of the Series
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources. See the
:ref:`Frequently Asked Questions <df-memory-usage>` for more
details.
show_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the DataFrame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a Series and returns None.
See Also
--------
Series.describe: Generate descriptive statistics of Series.
Series.memory_usage: Memory usage of Series.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> s = pd.Series(text_values, index=int_values)
>>> s.info()
<class 'pandas.core.series.Series'>
Index: 5 entries, 1 to 5
Series name: None
Non-Null Count Dtype
-------------- -----
5 non-null object
dtypes: object(1)
memory usage: 80.0+ bytes
Prints a summary excluding information about its values:
>>> s.info(verbose=False)
<class 'pandas.core.series.Series'>
Index: 5 entries, 1 to 5
dtypes: object(1)
memory usage: 80.0+ bytes
Pipe output of Series.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> s.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big Series and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6))
>>> s.info()
<class 'pandas.core.series.Series'>
RangeIndex: 1000000 entries, 0 to 999999
Series name: None
Non-Null Count Dtype
-------------- -----
1000000 non-null object
dtypes: object(1)
memory usage: 7.6+ MB
>>> s.info(memory_usage='deep')
<class 'pandas.core.series.Series'>
RangeIndex: 1000000 entries, 0 to 999999
Series name: None
Non-Null Count Dtype
-------------- -----
1000000 non-null object
dtypes: object(1)
memory usage: 55.3 MB
|
@doc(INFO_DOCSTRING, **series_sub_kwargs)
def info(
self,
verbose: bool | None = None,
buf: IO[str] | None = None,
max_cols: int | None = None,
memory_usage: bool | str | None = None,
show_counts: bool = True,
) -> None:
return SeriesInfo(self, memory_usage).render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
|
(self, verbose: Optional[bool] = None, buf: Optional[IO[str]] = None, max_cols: Optional[int] = None, memory_usage: Union[bool, str, NoneType] = None, show_counts: bool = True) -> NoneType
|
67,878 |
pandas.core.series
|
isin
|
Whether elements in Series are contained in `values`.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
* If `values` is a string
See Also
--------
DataFrame.isin : Equivalent method on DataFrame.
Examples
--------
>>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'llama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
To invert the boolean values, use the ``~`` operator:
>>> ~s.isin(['cow', 'llama'])
0 False
1 False
2 False
3 True
4 False
5 True
Name: animal, dtype: bool
Passing a single string as ``s.isin('llama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['llama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Strings and integers are distinct and are therefore not comparable:
>>> pd.Series([1]).isin(['1'])
0 False
dtype: bool
>>> pd.Series([1.1]).isin(['1.1'])
0 False
dtype: bool
|
def isin(self, values) -> Series:
"""
Whether elements in Series are contained in `values`.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
* If `values` is a string
See Also
--------
DataFrame.isin : Equivalent method on DataFrame.
Examples
--------
>>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'llama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
To invert the boolean values, use the ``~`` operator:
>>> ~s.isin(['cow', 'llama'])
0 False
1 False
2 False
3 True
4 False
5 True
Name: animal, dtype: bool
Passing a single string as ``s.isin('llama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['llama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Strings and integers are distinct and are therefore not comparable:
>>> pd.Series([1]).isin(['1'])
0 False
dtype: bool
>>> pd.Series([1.1]).isin(['1.1'])
0 False
dtype: bool
"""
result = algorithms.isin(self._values, values)
return self._constructor(result, index=self.index, copy=False).__finalize__(
self, method="isin"
)
|
(self, values) -> pandas.core.series.Series
|
67,879 |
pandas.core.series
|
isna
|
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
Series
Mask of bool values for each element in Series that
indicates whether an element is an NA value.
See Also
--------
Series.isnull : Alias of isna.
Series.notna : Boolean inverse of isna.
Series.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.nan])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
|
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def isna(self) -> Series:
return NDFrame.isna(self)
|
(self) -> pandas.core.series.Series
|
67,880 |
pandas.core.series
|
isnull
|
Series.isnull is an alias for Series.isna.
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
Series
Mask of bool values for each element in Series that
indicates whether an element is an NA value.
See Also
--------
Series.isnull : Alias of isna.
Series.notna : Boolean inverse of isna.
Series.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.nan])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
|
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def isnull(self) -> Series:
"""
Series.isnull is an alias for Series.isna.
"""
return super().isnull()
|
(self) -> pandas.core.series.Series
|
67,882 |
pandas.core.series
|
items
|
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
|
def items(self) -> Iterable[tuple[Hashable, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self))
|
(self) -> collections.abc.Iterable[tuple[collections.abc.Hashable, typing.Any]]
|
67,883 |
pandas.core.series
|
keys
|
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> s.keys()
Index([0, 1, 2], dtype='int64')
|
def keys(self) -> Index:
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> s.keys()
Index([0, 1, 2], dtype='int64')
"""
return self.index
|
(self) -> pandas.core.indexes.base.Index
|
67,884 |
pandas.core.series
|
kurt
|
Return unbiased kurtosis over requested axis.
Kurtosis obtained using Fisher's definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or scalar
Examples
--------
>>> s = pd.Series([1, 2, 2, 3], index=['cat', 'dog', 'dog', 'mouse'])
>>> s
cat 1
dog 2
dog 2
mouse 3
dtype: int64
>>> s.kurt()
1.5
With a DataFrame
>>> df = pd.DataFrame({'a': [1, 2, 2, 3], 'b': [3, 4, 4, 4]},
... index=['cat', 'dog', 'dog', 'mouse'])
>>> df
a b
cat 1 3
dog 2 4
dog 2 4
mouse 3 4
>>> df.kurt()
a 1.5
b 4.0
dtype: float64
With axis=None
>>> df.kurt(axis=None).round(6)
-0.988693
Using axis=1
>>> df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [3, 4], 'd': [1, 2]},
... index=['cat', 'dog'])
>>> df.kurt(axis=1)
cat -6.0
dog -6.0
dtype: float64
|
@doc(make_doc("kurt", ndim=1))
def kurt(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs)
|
(self, axis: 'Axis | None' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, **kwargs)
|
67,888 |
pandas.core.series
|
le
|
Return Less than or equal to of series and other, element-wise (binary operator `le`).
Equivalent to ``series <= other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
e 1.0
dtype: float64
>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])
>>> b
a 0.0
b 1.0
c 2.0
d NaN
f 1.0
dtype: float64
>>> a.le(b, fill_value=0)
a False
b True
c True
d False
e False
f True
dtype: bool
|
@Appender(ops.make_flex_doc("le", "series"))
def le(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.le, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,889 |
pandas.core.series
|
lt
|
Return Less than of series and other, element-wise (binary operator `lt`).
Equivalent to ``series < other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
e 1.0
dtype: float64
>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])
>>> b
a 0.0
b 1.0
c 2.0
d NaN
f 1.0
dtype: float64
>>> a.lt(b, fill_value=0)
a False
b False
c True
d False
e False
f True
dtype: bool
|
@Appender(ops.make_flex_doc("lt", "series"))
def lt(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.lt, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,890 |
pandas.core.series
|
map
|
Map values of Series according to an input mapping or function.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
Series.replace: Replace values given in `to_replace` with `value`.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.map : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
|
def map(
self,
arg: Callable | Mapping | Series,
na_action: Literal["ignore"] | None = None,
) -> Series:
"""
Map values of Series according to an input mapping or function.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
Series.replace: Replace values given in `to_replace` with `value`.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.map : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = self._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index, copy=False).__finalize__(
self, method="map"
)
|
(self, arg: Union[Callable, collections.abc.Mapping, pandas.core.series.Series], na_action: Optional[Literal['ignore']] = None) -> pandas.core.series.Series
|
67,892 |
pandas.core.series
|
max
|
Return the maximum of the values over the requested axis.
If you want the *index* of the maximum, use ``idxmax``. This is the equivalent of the ``numpy.ndarray`` method ``argmax``.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or scalar
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.max()
8
|
@doc(make_doc("max", ndim=1))
def max(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
return NDFrame.max(self, axis, skipna, numeric_only, **kwargs)
|
(self, axis: 'Axis | None' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, **kwargs)
|
67,893 |
pandas.core.series
|
mean
|
Return the mean of the values over the requested axis.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or scalar
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.mean()
2.0
With a DataFrame
>>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])
>>> df
a b
tiger 1 2
zebra 2 3
>>> df.mean()
a 1.5
b 2.5
dtype: float64
Using axis=1
>>> df.mean(axis=1)
tiger 1.5
zebra 2.5
dtype: float64
In this case, `numeric_only` should be set to `True` to avoid
getting an error.
>>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},
... index=['tiger', 'zebra'])
>>> df.mean(numeric_only=True)
a 1.5
dtype: float64
|
@doc(make_doc("mean", ndim=1))
def mean(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs)
|
(self, axis: 'Axis | None' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, **kwargs)
|
67,894 |
pandas.core.series
|
median
|
Return the median of the values over the requested axis.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or scalar
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.median()
2.0
With a DataFrame
>>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])
>>> df
a b
tiger 1 2
zebra 2 3
>>> df.median()
a 1.5
b 2.5
dtype: float64
Using axis=1
>>> df.median(axis=1)
tiger 1.5
zebra 2.5
dtype: float64
In this case, `numeric_only` should be set to `True`
to avoid getting an error.
>>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},
... index=['tiger', 'zebra'])
>>> df.median(numeric_only=True)
a 1.5
dtype: float64
|
@doc(make_doc("median", ndim=1))
def median(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
return NDFrame.median(self, axis, skipna, numeric_only, **kwargs)
|
(self, axis: 'Axis | None' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, **kwargs)
|
67,895 |
pandas.core.series
|
memory_usage
|
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
244
|
def memory_usage(self, index: bool = True, deep: bool = False) -> int:
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
244
"""
v = self._memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
|
(self, index: bool = True, deep: bool = False) -> int
|
67,896 |
pandas.core.series
|
min
|
Return the minimum of the values over the requested axis.
If you want the *index* of the minimum, use ``idxmin``. This is the equivalent of the ``numpy.ndarray`` method ``argmin``.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or scalar
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.min()
0
|
@doc(make_doc("min", ndim=1))
def min(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
return NDFrame.min(self, axis, skipna, numeric_only, **kwargs)
|
(self, axis: 'Axis | None' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, **kwargs)
|
67,897 |
pandas.core.series
|
mod
|
Return Modulo of series and other, element-wise (binary operator `mod`).
Equivalent to ``series % other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.rmod : Reverse of the Modulo operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.mod(b, fill_value=0)
a 0.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("mod", "series"))
def mod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.mod, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,898 |
pandas.core.series
|
mode
|
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
Examples
--------
>>> s = pd.Series([2, 4, 2, 2, 4, None])
>>> s.mode()
0 2.0
dtype: float64
More than one mode:
>>> s = pd.Series([2, 4, 8, 2, 4, None])
>>> s.mode()
0 2.0
1 4.0
dtype: float64
With and without considering null value:
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode(dropna=False)
0 NaN
dtype: float64
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode()
0 4.0
dtype: float64
|
def mode(self, dropna: bool = True) -> Series:
"""
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
Examples
--------
>>> s = pd.Series([2, 4, 2, 2, 4, None])
>>> s.mode()
0 2.0
dtype: float64
More than one mode:
>>> s = pd.Series([2, 4, 8, 2, 4, None])
>>> s.mode()
0 2.0
1 4.0
dtype: float64
With and without considering null value:
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode(dropna=False)
0 NaN
dtype: float64
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode()
0 4.0
dtype: float64
"""
# TODO: Add option for bins like value_counts()
values = self._values
if isinstance(values, np.ndarray):
res_values = algorithms.mode(values, dropna=dropna)
else:
res_values = values._mode(dropna=dropna)
# Ensure index is type stable (should always use int index)
return self._constructor(
res_values,
index=range(len(res_values)),
name=self.name,
copy=False,
dtype=self.dtype,
).__finalize__(self, method="mode")
|
(self, dropna: bool = True) -> pandas.core.series.Series
|
67,899 |
pandas.core.series
|
mul
|
Return Multiplication of series and other, element-wise (binary operator `mul`).
Equivalent to ``series * other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.rmul : Reverse of the Multiplication operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.multiply(b, fill_value=0)
a 1.0
b 0.0
c 0.0
d 0.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("mul", "series"))
def mul(
self,
other,
level: Level | None = None,
fill_value: float | None = None,
axis: Axis = 0,
) -> Series:
return self._flex_method(
other, operator.mul, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level: 'Level | None' = None, fill_value: 'float | None' = None, axis: 'Axis' = 0) -> 'Series'
|
67,901 |
pandas.core.series
|
ne
|
Return Not equal to of series and other, element-wise (binary operator `ne`).
Equivalent to ``series != other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.ne(b, fill_value=0)
a False
b True
c True
d True
e True
dtype: bool
|
@Appender(ops.make_flex_doc("ne", "series"))
def ne(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.ne, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,902 |
pandas.core.series
|
nlargest
|
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
|
def nlargest(
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
) -> Series:
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return selectn.SelectNSeries(self, n=n, keep=keep).nlargest()
|
(self, n: int = 5, keep: Literal['first', 'last', 'all'] = 'first') -> pandas.core.series.Series
|
67,903 |
pandas.core.series
|
notna
|
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
Series
Mask of bool values for each element in Series that
indicates whether an element is not an NA value.
See Also
--------
Series.notnull : Alias of notna.
Series.isna : Boolean inverse of notna.
Series.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.nan])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
|
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def notna(self) -> Series:
return super().notna()
|
(self) -> pandas.core.series.Series
|
67,904 |
pandas.core.series
|
notnull
|
Series.notnull is an alias for Series.notna.
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
Series
Mask of bool values for each element in Series that
indicates whether an element is not an NA value.
See Also
--------
Series.notnull : Alias of notna.
Series.isna : Boolean inverse of notna.
Series.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.nan])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
|
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type]
def notnull(self) -> Series:
"""
Series.notnull is an alias for Series.notna.
"""
return super().notnull()
|
(self) -> pandas.core.series.Series
|
67,905 |
pandas.core.series
|
nsmallest
|
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
|
def nsmallest(
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
) -> Series:
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest()
|
(self, n: int = 5, keep: Literal['first', 'last', 'all'] = 'first') -> pandas.core.series.Series
|
67,910 |
pandas.core.series
|
pop
|
Return item and drops from series. Raise KeyError if not found.
Parameters
----------
item : label
Index of the element that needs to be removed.
Returns
-------
Value that is popped from series.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser.pop(0)
1
>>> ser
1 2
2 3
dtype: int64
|
def pop(self, item: Hashable) -> Any:
"""
Return item and drops from series. Raise KeyError if not found.
Parameters
----------
item : label
Index of the element that needs to be removed.
Returns
-------
Value that is popped from series.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser.pop(0)
1
>>> ser
1 2
2 3
dtype: int64
"""
return super().pop(item=item)
|
(self, item: collections.abc.Hashable) -> Any
|
67,911 |
pandas.core.series
|
pow
|
Return Exponential power of series and other, element-wise (binary operator `pow`).
Equivalent to ``series ** other``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.rpow : Reverse of the Exponential power operator, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.pow(b, fill_value=0)
a 1.0
b 1.0
c 1.0
d 0.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("pow", "series"))
def pow(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, operator.pow, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,912 |
pandas.core.series
|
prod
|
Return the product of the values over the requested axis.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
.. warning::
The behavior of DataFrame.prod with ``axis=None`` is deprecated,
in a future version this will reduce over both axes and return a scalar
To retain the old behavior, pass axis=0 (or do not pass axis).
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or scalar
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([], dtype="float64").prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([], dtype="float64").prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan
|
@doc(make_doc("prod", ndim=1))
def prod(
self,
axis: Axis | None = None,
skipna: bool = True,
numeric_only: bool = False,
min_count: int = 0,
**kwargs,
):
return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs)
|
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, numeric_only: 'bool' = False, min_count: 'int' = 0, **kwargs)
|
67,914 |
pandas.core.series
|
quantile
|
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * (x-i)/(j-i)`, where `(x-i)/(j-i)` is
the fractional part of the index surrounded by `i > j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile : Calculate the rolling quantile.
numpy.percentile : Returns the q-th percentile(s) of the array elements.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
|
def quantile(
self,
q: float | Sequence[float] | AnyArrayLike = 0.5,
interpolation: QuantileInterpolation = "linear",
) -> float | Series:
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * (x-i)/(j-i)`, where `(x-i)/(j-i)` is
the fractional part of the index surrounded by `i > j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile : Calculate the rolling quantile.
numpy.percentile : Returns the q-th percentile(s) of the array elements.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
validate_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
idx = Index(q, dtype=np.float64)
return self._constructor(result, index=idx, name=self.name)
else:
# scalar
return result.iloc[0]
|
(self, q: 'float | Sequence[float] | AnyArrayLike' = 0.5, interpolation: 'QuantileInterpolation' = 'linear') -> 'float | Series'
|
67,915 |
pandas.core.series
|
radd
|
Return Addition of series and other, element-wise (binary operator `radd`).
Equivalent to ``other + series``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.add : Element-wise Addition, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.add(b, fill_value=0)
a 2.0
b 1.0
c 1.0
d 1.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("radd", "series"))
def radd(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, roperator.radd, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,917 |
pandas.core.series
|
ravel
|
Return the flattened underlying data as an ndarray or ExtensionArray.
.. deprecated:: 2.2.0
Series.ravel is deprecated. The underlying array is already 1D, so
ravel is not necessary. Use :meth:`to_numpy` for conversion to a numpy
array instead.
Returns
-------
numpy.ndarray or ExtensionArray
Flattened data of the Series.
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.ravel() # doctest: +SKIP
array([1, 2, 3])
|
def ravel(self, order: str = "C") -> ArrayLike:
"""
Return the flattened underlying data as an ndarray or ExtensionArray.
.. deprecated:: 2.2.0
Series.ravel is deprecated. The underlying array is already 1D, so
ravel is not necessary. Use :meth:`to_numpy` for conversion to a numpy
array instead.
Returns
-------
numpy.ndarray or ExtensionArray
Flattened data of the Series.
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.ravel() # doctest: +SKIP
array([1, 2, 3])
"""
warnings.warn(
"Series.ravel is deprecated. The underlying array is already 1D, so "
"ravel is not necessary. Use `to_numpy()` for conversion to a numpy "
"array instead.",
FutureWarning,
stacklevel=2,
)
arr = self._values.ravel(order=order)
if isinstance(arr, np.ndarray) and using_copy_on_write():
arr.flags.writeable = False
return arr
|
(self, order: 'str' = 'C') -> 'ArrayLike'
|
67,918 |
pandas.core.series
|
rtruediv
|
Return Floating division of series and other, element-wise (binary operator `rtruediv`).
Equivalent to ``other / series``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
The result of the operation.
See Also
--------
Series.truediv : Element-wise Floating division, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.divide(b, fill_value=0)
a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64
|
@Appender(ops.make_flex_doc("rtruediv", "series"))
def rtruediv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,919 |
pandas.core.series
|
rdivmod
|
Return Integer division and modulo of series and other, element-wise (binary operator `rdivmod`).
Equivalent to ``other divmod series``, but with support to substitute a fill_value for
missing data in either one of the inputs.
Parameters
----------
other : Series or scalar value
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : None or float value, default None (NaN)
Fill existing missing (NaN) values, and any new element needed for
successful Series alignment, with this value before computation.
If data in both corresponding Series locations is missing
the result of filling (at that location) will be missing.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.divmod : Element-wise Integer division and modulo, see
`Python documentation
<https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_
for more details.
Examples
--------
>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])
>>> a
a 1.0
b 1.0
c 1.0
d NaN
dtype: float64
>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
>>> b
a 1.0
b NaN
d 1.0
e NaN
dtype: float64
>>> a.divmod(b, fill_value=0)
(a 1.0
b inf
c inf
d 0.0
e NaN
dtype: float64,
a 0.0
b NaN
c NaN
d 0.0
e NaN
dtype: float64)
|
@Appender(ops.make_flex_doc("rdivmod", "series"))
def rdivmod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series:
return self._flex_method(
other, roperator.rdivmod, level=level, fill_value=fill_value, axis=axis
)
|
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
|
67,920 |
pandas.core.series
|
reindex
|
Conform Series to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
index : array-like, optional
New labels for the index. Preferably an Index object to avoid
duplicating data.
axis : int or str, optional
Unused.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
.. note::
The `copy` keyword will change behavior in pandas 3.0.
`Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
will be enabled by default, which means that all methods with a
`copy` keyword will use a lazy copy mechanism to defer the copy and
ignore the `copy` keyword. The `copy` keyword will be removed in a
future version of pandas.
You can already get the future behavior and improvements through
enabling copy on write ``pd.options.mode.copy_on_write = True``
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.nan
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
|
@doc(
NDFrame.reindex, # type: ignore[has-type]
klass=_shared_doc_kwargs["klass"],
optional_reindex=_shared_doc_kwargs["optional_reindex"],
)
def reindex( # type: ignore[override]
self,
index=None,
*,
axis: Axis | None = None,
method: ReindexMethod | None = None,
copy: bool | None = None,
level: Level | None = None,
fill_value: Scalar | None = None,
limit: int | None = None,
tolerance=None,
) -> Series:
return super().reindex(
index=index,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
|
(self, index=None, *, axis: 'Axis | None' = None, method: 'ReindexMethod | None' = None, copy: 'bool | None' = None, level: 'Level | None' = None, fill_value: 'Scalar | None' = None, limit: 'int | None' = None, tolerance=None) -> 'Series'
|
67,922 |
pandas.core.series
|
rename
|
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
copy : bool, default True
Also copy underlying data.
.. note::
The `copy` keyword will change behavior in pandas 3.0.
`Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
will be enabled by default, which means that all methods with a
`copy` keyword will use a lazy copy mechanism to defer the copy and
ignore the `copy` keyword. The `copy` keyword will be removed in a
future version of pandas.
You can already get the future behavior and improvements through
enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
Whether to return a new Series. If True the value of copy is ignored.
level : int or level name, default None
In case of MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise `KeyError` when a `dict-like mapper` or
`index` contains labels that are not present in the index being transformed.
If 'ignore', existing keys will be renamed and extra keys will be ignored.
Returns
-------
Series or None
Series with index labels or name altered or None if ``inplace=True``.
See Also
--------
DataFrame.rename : Corresponding DataFrame method.
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
|
def rename(
self,
index: Renamer | Hashable | None = None,
*,
axis: Axis | None = None,
copy: bool | None = None,
inplace: bool = False,
level: Level | None = None,
errors: IgnoreRaise = "ignore",
) -> Series | None:
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
copy : bool, default True
Also copy underlying data.
.. note::
The `copy` keyword will change behavior in pandas 3.0.
`Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
will be enabled by default, which means that all methods with a
`copy` keyword will use a lazy copy mechanism to defer the copy and
ignore the `copy` keyword. The `copy` keyword will be removed in a
future version of pandas.
You can already get the future behavior and improvements through
enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
Whether to return a new Series. If True the value of copy is ignored.
level : int or level name, default None
In case of MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise `KeyError` when a `dict-like mapper` or
`index` contains labels that are not present in the index being transformed.
If 'ignore', existing keys will be renamed and extra keys will be ignored.
Returns
-------
Series or None
Series with index labels or name altered or None if ``inplace=True``.
See Also
--------
DataFrame.rename : Corresponding DataFrame method.
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
if axis is not None:
# Make sure we raise if an invalid 'axis' is passed.
axis = self._get_axis_number(axis)
if callable(index) or is_dict_like(index):
# error: Argument 1 to "_rename" of "NDFrame" has incompatible
# type "Union[Union[Mapping[Any, Hashable], Callable[[Any],
# Hashable]], Hashable, None]"; expected "Union[Mapping[Any,
# Hashable], Callable[[Any], Hashable], None]"
return super()._rename(
index, # type: ignore[arg-type]
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
else:
return self._set_name(index, inplace=inplace, deep=copy)
|
(self, index: 'Renamer | Hashable | None' = None, *, axis: 'Axis | None' = None, copy: 'bool | None' = None, inplace: 'bool' = False, level: 'Level | None' = None, errors: 'IgnoreRaise' = 'ignore') -> 'Series | None'
|
67,923 |
pandas.core.series
|
rename_axis
|
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Note that the ``columns`` parameter is not allowed if the
object is a Series. This parameter only apply for DataFrame
type objects.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename. For `Series` this parameter is unused and defaults to 0.
copy : bool, default None
Also copy underlying data.
.. note::
The `copy` keyword will change behavior in pandas 3.0.
`Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
will be enabled by default, which means that all methods with a
`copy` keyword will use a lazy copy mechanism to defer the copy and
ignore the `copy` keyword. The `copy` keyword will be removed in a
future version of pandas.
You can already get the future behavior and improvements through
enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
|
@doc(NDFrame.rename_axis)
def rename_axis(
self,
mapper: IndexLabel | lib.NoDefault = lib.no_default,
*,
index=lib.no_default,
axis: Axis = 0,
copy: bool = True,
inplace: bool = False,
) -> Self | None:
return super().rename_axis(
mapper=mapper,
index=index,
axis=axis,
copy=copy,
inplace=inplace,
)
|
(self, mapper: 'IndexLabel | lib.NoDefault' = <no_default>, *, index=<no_default>, axis: 'Axis' = 0, copy: 'bool' = True, inplace: 'bool' = False) -> 'Self | None'
|
67,924 |
pandas.core.series
|
reorder_levels
|
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
Reference level by number or key.
Returns
-------
type of caller (new object)
Examples
--------
>>> arrays = [np.array(["dog", "dog", "cat", "cat", "bird", "bird"]),
... np.array(["white", "black", "white", "black", "white", "black"])]
>>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays)
>>> s
dog white 1
black 2
cat white 3
black 3
bird white 5
black 2
dtype: int64
>>> s.reorder_levels([1, 0])
white dog 1
black dog 2
white cat 3
black cat 3
white bird 5
black bird 2
dtype: int64
|
def reorder_levels(self, order: Sequence[Level]) -> Series:
"""
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
Reference level by number or key.
Returns
-------
type of caller (new object)
Examples
--------
>>> arrays = [np.array(["dog", "dog", "cat", "cat", "bird", "bird"]),
... np.array(["white", "black", "white", "black", "white", "black"])]
>>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays)
>>> s
dog white 1
black 2
cat white 3
black 3
bird white 5
black 2
dtype: int64
>>> s.reorder_levels([1, 0])
white dog 1
black dog 2
white cat 3
black cat 3
white bird 5
black bird 2
dtype: int64
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy(deep=None)
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
return result
|
(self, order: 'Sequence[Level]') -> 'Series'
|
67,925 |
pandas.core.series
|
repeat
|
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
|
def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series:
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
"""
nv.validate_repeat((), {"axis": axis})
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index, copy=False).__finalize__(
self, method="repeat"
)
|
(self, repeats: int | collections.abc.Sequence[int], axis: NoneType = None) -> pandas.core.series.Series
|
67,928 |
pandas.core.series
|
reset_index
|
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
allow_duplicates : bool, default False
Allow duplicate column labels to be created.
.. versionadded:: 1.5.0
Returns
-------
Series or DataFrame or None
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
|
def reset_index(
self,
level: IndexLabel | None = None,
*,
drop: bool = False,
name: Level = lib.no_default,
inplace: bool = False,
allow_duplicates: bool = False,
) -> DataFrame | Series | None:
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
allow_duplicates : bool, default False
Allow duplicate column labels to be created.
.. versionadded:: 1.5.0
Returns
-------
Series or DataFrame or None
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
new_index = default_index(len(self))
if level is not None:
level_list: Sequence[Hashable]
if not isinstance(level, (tuple, list)):
level_list = [level]
else:
level_list = level
level_list = [self.index._get_level_number(lev) for lev in level_list]
if len(level_list) < self.index.nlevels:
new_index = self.index.droplevel(level_list)
if inplace:
self.index = new_index
elif using_copy_on_write():
new_ser = self.copy(deep=False)
new_ser.index = new_index
return new_ser.__finalize__(self, method="reset_index")
else:
return self._constructor(
self._values.copy(), index=new_index, copy=False, dtype=self.dtype
).__finalize__(self, method="reset_index")
elif inplace:
raise TypeError(
"Cannot reset_index inplace on a Series to create a DataFrame"
)
else:
if name is lib.no_default:
# For backwards compatibility, keep columns as [0] instead of
# [None] when self.name is None
if self.name is None:
name = 0
else:
name = self.name
df = self.to_frame(name)
return df.reset_index(
level=level, drop=drop, allow_duplicates=allow_duplicates
)
return None
|
(self, level: 'IndexLabel | None' = None, *, drop: 'bool' = False, name: 'Level' = <no_default>, inplace: 'bool' = False, allow_duplicates: 'bool' = False) -> 'DataFrame | Series | None'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.