index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
67,929
pandas.core.series
rfloordiv
Return Integer division of series and other, element-wise (binary operator `rfloordiv`). Equivalent to ``other // series``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series The result of the operation. See Also -------- Series.floordiv : Element-wise Integer division, see `Python documentation <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.floordiv(b, fill_value=0) a 1.0 b inf c inf d 0.0 e NaN dtype: float64
@Appender(ops.make_flex_doc("rfloordiv", "series")) def rfloordiv(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: return self._flex_method( other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis )
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
67,930
pandas.core.series
rmod
Return Modulo of series and other, element-wise (binary operator `rmod`). Equivalent to ``other % series``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series The result of the operation. See Also -------- Series.mod : Element-wise Modulo, see `Python documentation <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.mod(b, fill_value=0) a 0.0 b NaN c NaN d 0.0 e NaN dtype: float64
@Appender(ops.make_flex_doc("rmod", "series")) def rmod(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: return self._flex_method( other, roperator.rmod, level=level, fill_value=fill_value, axis=axis )
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
67,931
pandas.core.series
rmul
Return Multiplication of series and other, element-wise (binary operator `rmul`). Equivalent to ``other * series``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series The result of the operation. See Also -------- Series.mul : Element-wise Multiplication, see `Python documentation <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.multiply(b, fill_value=0) a 1.0 b 0.0 c 0.0 d 0.0 e NaN dtype: float64
@Appender(ops.make_flex_doc("rmul", "series")) def rmul(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: return self._flex_method( other, roperator.rmul, level=level, fill_value=fill_value, axis=axis )
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
67,933
pandas.core.series
round
Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64
def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) new_mgr = self._mgr.round(decimals=decimals, using_cow=using_copy_on_write()) return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__( self, method="round" )
(self, decimals: int = 0, *args, **kwargs) -> pandas.core.series.Series
67,934
pandas.core.series
rpow
Return Exponential power of series and other, element-wise (binary operator `rpow`). Equivalent to ``other ** series``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series The result of the operation. See Also -------- Series.pow : Element-wise Exponential power, see `Python documentation <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.pow(b, fill_value=0) a 1.0 b 1.0 c 1.0 d 0.0 e NaN dtype: float64
@Appender(ops.make_flex_doc("rpow", "series")) def rpow(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: return self._flex_method( other, roperator.rpow, level=level, fill_value=fill_value, axis=axis )
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
67,935
pandas.core.series
rsub
Return Subtraction of series and other, element-wise (binary operator `rsub`). Equivalent to ``other - series``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series The result of the operation. See Also -------- Series.sub : Element-wise Subtraction, see `Python documentation <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.subtract(b, fill_value=0) a 0.0 b 1.0 c 1.0 d -1.0 e NaN dtype: float64
@Appender(ops.make_flex_doc("rsub", "series")) def rsub(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: return self._flex_method( other, roperator.rsub, level=level, fill_value=fill_value, axis=axis )
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
67,938
pandas.core.series
searchsorted
Find indices where elements should be inserted to maintain order. Find the indices into a sorted Series `self` such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. .. note:: The Series *must* be monotonically sorted, otherwise wrong locations will likely be returned. Pandas does *not* check this for you. Parameters ---------- value : array-like or scalar Values to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array-like, optional Optional array of integer indices that sort `self` into ascending order. They are typically the result of ``np.argsort``. Returns ------- int or array of int A scalar or array of insertion points with the same shape as `value`. See Also -------- sort_values : Sort by the values along either axis. numpy.searchsorted : Similar method from NumPy. Notes ----- Binary search is used to find the required insertion points. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> ser 0 1 1 2 2 3 dtype: int64 >>> ser.searchsorted(4) 3 >>> ser.searchsorted([0, 4]) array([0, 3]) >>> ser.searchsorted([1, 3], side='left') array([0, 2]) >>> ser.searchsorted([1, 3], side='right') array([1, 3]) >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'])) >>> ser 0 2000-03-11 1 2000-03-12 2 2000-03-13 dtype: datetime64[ns] >>> ser.searchsorted('3/14/2000') 3 >>> ser = pd.Categorical( ... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True ... ) >>> ser ['apple', 'bread', 'bread', 'cheese', 'milk'] Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk'] >>> ser.searchsorted('bread') 1 >>> ser.searchsorted(['bread'], side='right') array([3]) If the values are not monotonically sorted, wrong locations may be returned: >>> ser = pd.Series([2, 1, 3]) >>> ser 0 2 1 1 2 3 dtype: int64 >>> ser.searchsorted(1) # doctest: +SKIP 0 # wrong result, correct would be 1
@doc(base.IndexOpsMixin.searchsorted, klass="Series") # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter | None = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter)
(self, value: 'NumpyValueArrayLike | ExtensionArray', side: "Literal['left', 'right']" = 'left', sorter: 'NumpySorter | None' = None) -> 'npt.NDArray[np.intp] | np.intp'
67,939
pandas.core.series
sem
Return unbiased standard error of the mean over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument Parameters ---------- axis : {index (0)} For `Series` this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.sem with ``axis=None`` is deprecated, in a future version this will reduce over both axes and return a scalar To retain the old behavior, pass axis=0 (or do not pass axis). skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : bool, default False Include only float, int, boolean columns. Not implemented for Series. Returns ------- scalar or Series (if level specified) Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.sem().round(6) 0.57735 With a DataFrame >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra']) >>> df a b tiger 1 2 zebra 2 3 >>> df.sem() a 0.5 b 0.5 dtype: float64 Using axis=1 >>> df.sem(axis=1) tiger 0.5 zebra 0.5 dtype: float64 In this case, `numeric_only` should be set to `True` to avoid getting an error. >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']}, ... index=['tiger', 'zebra']) >>> df.sem(numeric_only=True) a 0.5 dtype: float64
@doc(make_doc("sem", ndim=1)) def sem( self, axis: Axis | None = None, skipna: bool = True, ddof: int = 1, numeric_only: bool = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs)
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, ddof: 'int' = 1, numeric_only: 'bool' = False, **kwargs)
67,940
pandas.core.series
set_axis
Assign desired index to given axis. Indexes for row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : {0 or 'index'}, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` Returns ------- Series An object of type Series. See Also -------- Series.rename_axis : Alter the name of the index. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64
@Appender( """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) @Substitution( klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], extended_summary_sub="", axis_description_sub="", see_also_sub="", ) @Appender(NDFrame.set_axis.__doc__) def set_axis( self, labels, *, axis: Axis = 0, copy: bool | None = None, ) -> Series: return super().set_axis(labels, axis=axis, copy=copy)
(self, labels, *, axis: 'Axis' = 0, copy: 'bool | None' = None) -> 'Series'
67,942
pandas.core.generic
shift
Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int or Sequence Number of periods to shift. Can be positive or negative. If an iterable of ints, the data will be shifted once by each int. This is equivalent to shifting by one value at a time and concatenating all resulting frames. The resulting columns will have the shift suffixed to their column names. For multiple periods, axis must not be 1. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {0 or 'index', 1 or 'columns', None}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. suffix : str, optional If str and periods is an iterable, this is added after the column name and before the shift value for each shifted column name. Returns ------- Series/DataFrame Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df['Col1'].shift(periods=[0, 1, 2]) Col1_0 Col1_1 Col1_2 2020-01-01 10 NaN NaN 2020-01-02 20 10.0 NaN 2020-01-03 15 20.0 10.0 2020-01-04 30 15.0 20.0 2020-01-05 45 30.0 15.0
@doc(klass=_shared_doc_kwargs["klass"]) def shift( self, periods: int | Sequence[int] = 1, freq=None, axis: Axis = 0, fill_value: Hashable = lib.no_default, suffix: str | None = None, ) -> Self | DataFrame: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int or Sequence Number of periods to shift. Can be positive or negative. If an iterable of ints, the data will be shifted once by each int. This is equivalent to shifting by one value at a time and concatenating all resulting frames. The resulting columns will have the shift suffixed to their column names. For multiple periods, axis must not be 1. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. suffix : str, optional If str and periods is an iterable, this is added after the column name and before the shift value for each shifted column name. Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df['Col1'].shift(periods=[0, 1, 2]) Col1_0 Col1_1 Col1_2 2020-01-01 10 NaN NaN 2020-01-02 20 10.0 NaN 2020-01-03 15 20.0 10.0 2020-01-04 30 15.0 20.0 2020-01-05 45 30.0 15.0 """ axis = self._get_axis_number(axis) if freq is not None and fill_value is not lib.no_default: # GH#53832 warnings.warn( "Passing a 'freq' together with a 'fill_value' silently ignores " "the fill_value and is deprecated. This will raise in a future " "version.", FutureWarning, stacklevel=find_stack_level(), ) fill_value = lib.no_default if periods == 0: return self.copy(deep=None) if is_list_like(periods) and isinstance(self, ABCSeries): return self.to_frame().shift( periods=periods, freq=freq, axis=axis, fill_value=fill_value ) periods = cast(int, periods) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) assert axis == 0 # axis == 1 cases handled in DataFrame.shift new_data = self._mgr.shift(periods=periods, fill_value=fill_value) return self._constructor_from_mgr( new_data, axes=new_data.axes ).__finalize__(self, method="shift") return self._shift_with_freq(periods, axis, freq)
(self, periods: 'int | Sequence[int]' = 1, freq=None, axis: 'Axis' = 0, fill_value: 'Hashable' = <no_default>, suffix: 'str | None' = None) -> 'Self | DataFrame'
67,943
pandas.core.series
skew
Return unbiased skew over requested axis. Normalized by N-1. Parameters ---------- axis : {index (0)} Axis for the function to be applied on. For `Series` this parameter is unused and defaults to 0. For DataFrames, specifying ``axis=None`` will apply the aggregation across both axes. .. versionadded:: 2.0.0 skipna : bool, default True Exclude NA/null values when computing the result. numeric_only : bool, default False Include only float, int, boolean columns. Not implemented for Series. **kwargs Additional keyword arguments to be passed to the function. Returns ------- scalar or scalar Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.skew() 0.0 With a DataFrame >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4], 'c': [1, 3, 5]}, ... index=['tiger', 'zebra', 'cow']) >>> df a b c tiger 1 2 1 zebra 2 3 3 cow 3 4 5 >>> df.skew() a 0.0 b 0.0 c 0.0 dtype: float64 Using axis=1 >>> df.skew(axis=1) tiger 1.732051 zebra -1.732051 cow 0.000000 dtype: float64 In this case, `numeric_only` should be set to `True` to avoid getting an error. >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': ['T', 'Z', 'X']}, ... index=['tiger', 'zebra', 'cow']) >>> df.skew(numeric_only=True) a 0.0 dtype: float64
@doc(make_doc("skew", ndim=1)) def skew( self, axis: Axis | None = 0, skipna: bool = True, numeric_only: bool = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs)
(self, axis: 'Axis | None' = 0, skipna: 'bool' = True, numeric_only: 'bool' = False, **kwargs)
67,944
pandas.core.series
sort_index
Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64
def sort_index( self, *, axis: Axis = 0, level: IndexLabel | None = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc | None = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, )
(self, *, axis: 'Axis' = 0, level: 'IndexLabel | None' = None, ascending: 'bool | Sequence[bool]' = True, inplace: 'bool' = False, kind: 'SortKind' = 'quicksort', na_position: 'NaPosition' = 'last', sort_remaining: 'bool' = True, ignore_index: 'bool' = False, key: 'IndexKeyFunc | None' = None) -> 'Series | None'
67,945
pandas.core.series
sort_values
Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64
def sort_values( self, *, axis: Axis = 0, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", ignore_index: bool = False, key: ValueKeyFunc | None = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[bool], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort if key: values_to_sort = cast(Series, ensure_key_mapped(self, key))._values else: values_to_sort = self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None
(self, *, axis: 'Axis' = 0, ascending: 'bool | Sequence[bool]' = True, inplace: 'bool' = False, kind: 'SortKind' = 'quicksort', na_position: 'NaPosition' = 'last', ignore_index: 'bool' = False, key: 'ValueKeyFunc | None' = None) -> 'Series | None'
67,947
pandas.core.series
std
Return sample standard deviation over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument. Parameters ---------- axis : {index (0)} For `Series` this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.std with ``axis=None`` is deprecated, in a future version this will reduce over both axes and return a scalar To retain the old behavior, pass axis=0 (or do not pass axis). skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : bool, default False Include only float, int, boolean columns. Not implemented for Series. Returns ------- scalar or Series (if level specified) Notes ----- To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the default `ddof=1`) Examples -------- >>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], ... 'age': [21, 25, 62, 43], ... 'height': [1.61, 1.87, 1.49, 2.01]} ... ).set_index('person_id') >>> df age height person_id 0 21 1.61 1 25 1.87 2 62 1.49 3 43 2.01 The standard deviation of the columns can be found as follows: >>> df.std() age 18.786076 height 0.237417 dtype: float64 Alternatively, `ddof=0` can be set to normalize by N instead of N-1: >>> df.std(ddof=0) age 16.269219 height 0.205609 dtype: float64
@doc(make_doc("std", ndim=1)) def std( self, axis: Axis | None = None, skipna: bool = True, ddof: int = 1, numeric_only: bool = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs)
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, ddof: 'int' = 1, numeric_only: 'bool' = False, **kwargs)
67,948
pandas.core.series
sub
Return Subtraction of series and other, element-wise (binary operator `sub`). Equivalent to ``series - other``, but with support to substitute a fill_value for missing data in either one of the inputs. Parameters ---------- other : Series or scalar value level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result of filling (at that location) will be missing. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series The result of the operation. See Also -------- Series.rsub : Reverse of the Subtraction operator, see `Python documentation <https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types>`_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.subtract(b, fill_value=0) a 0.0 b 1.0 c 1.0 d -1.0 e NaN dtype: float64
@Appender(ops.make_flex_doc("sub", "series")) def sub(self, other, level=None, fill_value=None, axis: Axis = 0) -> Series: return self._flex_method( other, operator.sub, level=level, fill_value=fill_value, axis=axis )
(self, other, level=None, fill_value=None, axis: 'Axis' = 0) -> 'Series'
67,950
pandas.core.series
sum
Return the sum of the values over the requested axis. This is equivalent to the method ``numpy.sum``. Parameters ---------- axis : {index (0)} Axis for the function to be applied on. For `Series` this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.sum with ``axis=None`` is deprecated, in a future version this will reduce over both axes and return a scalar To retain the old behavior, pass axis=0 (or do not pass axis). .. versionadded:: 2.0.0 skipna : bool, default True Exclude NA/null values when computing the result. numeric_only : bool, default False Include only float, int, boolean columns. Not implemented for Series. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. **kwargs Additional keyword arguments to be passed to the function. Returns ------- scalar or scalar See Also -------- Series.sum : Return the sum. Series.min : Return the minimum. Series.max : Return the maximum. Series.idxmin : Return the index of the minimum. Series.idxmax : Return the index of the maximum. DataFrame.sum : Return the sum over the requested axis. DataFrame.min : Return the minimum over the requested axis. DataFrame.max : Return the maximum over the requested axis. DataFrame.idxmin : Return the index of the minimum over the requested axis. DataFrame.idxmax : Return the index of the maximum over the requested axis. Examples -------- >>> idx = pd.MultiIndex.from_arrays([ ... ['warm', 'warm', 'cold', 'cold'], ... ['dog', 'falcon', 'fish', 'spider']], ... names=['blooded', 'animal']) >>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx) >>> s blooded animal warm dog 4 falcon 2 cold fish 0 spider 8 Name: legs, dtype: int64 >>> s.sum() 14 By default, the sum of an empty or all-NA Series is ``0``. >>> pd.Series([], dtype="float64").sum() # min_count=0 is the default 0.0 This can be controlled with the ``min_count`` parameter. For example, if you'd like the sum of an empty series to be NaN, pass ``min_count=1``. >>> pd.Series([], dtype="float64").sum(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() 0.0 >>> pd.Series([np.nan]).sum(min_count=1) nan
@doc(make_doc("sum", ndim=1)) def sum( self, axis: Axis | None = None, skipna: bool = True, numeric_only: bool = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs)
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, numeric_only: 'bool' = False, min_count: 'int' = 0, **kwargs)
67,952
pandas.core.series
swaplevel
Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. copy : bool, default True Whether to copy underlying data. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` Returns ------- Series Series with levels swapped in MultiIndex. Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object
@doc( klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True``""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result
(self, i: 'Level' = -2, j: 'Level' = -1, copy: 'bool | None' = None) -> 'Series'
67,957
pandas.core.series
to_dict
Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.MutableMapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.MutableMapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(into=OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(into=dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
def _coerce_method(converter): """ Install the scalar coercion methods. """ def wrapper(self): if len(self) == 1: warnings.warn( f"Calling {converter.__name__} on a single element Series is " "deprecated and will raise a TypeError in the future. " f"Use {converter.__name__}(ser.iloc[0]) instead", FutureWarning, stacklevel=find_stack_level(), ) return converter(self.iloc[0]) raise TypeError(f"cannot convert the series to {converter}") wrapper.__name__ = f"__{converter.__name__}__" return wrapper
(self, *, into: 'type[MutableMappingT] | MutableMappingT' = <class 'dict'>) -> 'MutableMappingT'
67,959
pandas.core.series
to_frame
Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c
def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim_from_mgr(mgr, axes=mgr.axes) return df.__finalize__(self, method="to_frame")
(self, name: 'Hashable' = <no_default>) -> 'DataFrame'
67,964
pandas.core.series
to_markdown
Print Series in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc. For HTTP(S) URLs the key-value pairs are forwarded to ``urllib.request.Request`` as header options. For other URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more details, and for more examples on storage options refer `here <https://pandas.pydata.org/docs/user_guide/io.html? highlight=storage_options#reading-writing-remote-files>`_. **kwargs These parameters will be passed to `tabulate <https://pypi.org/project/tabulate>`_. Returns ------- str Series in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+
@doc( klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions | None = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. {storage_options} **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode=mode, index=index, storage_options=storage_options, **kwargs )
(self, buf: 'IO[str] | None' = None, mode: 'str' = 'wt', index: 'bool' = True, storage_options: 'StorageOptions | None' = None, **kwargs) -> 'str | None'
67,966
pandas.core.series
to_period
Convert Series from DatetimeIndex to PeriodIndex. Parameters ---------- freq : str, default None Frequency associated with the PeriodIndex. copy : bool, default True Whether or not to return a copy. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` Returns ------- Series Series with index converted to PeriodIndex. Examples -------- >>> idx = pd.DatetimeIndex(['2023', '2024', '2025']) >>> s = pd.Series([1, 2, 3], index=idx) >>> s = s.to_period() >>> s 2023 1 2024 2 2025 3 Freq: Y-DEC, dtype: int64 Viewing the index >>> s.index PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]')
def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series: """ Convert Series from DatetimeIndex to PeriodIndex. Parameters ---------- freq : str, default None Frequency associated with the PeriodIndex. copy : bool, default True Whether or not to return a copy. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` Returns ------- Series Series with index converted to PeriodIndex. Examples -------- >>> idx = pd.DatetimeIndex(['2023', '2024', '2025']) >>> s = pd.Series([1, 2, 3], index=idx) >>> s = s.to_period() >>> s 2023 1 2024 2 2025 3 Freq: Y-DEC, dtype: int64 Viewing the index >>> s.index PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]') """ if not isinstance(self.index, DatetimeIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") new_obj = self.copy(deep=copy and not using_copy_on_write()) new_index = self.index.to_period(freq=freq) setattr(new_obj, "index", new_index) return new_obj
(self, freq: Optional[str] = None, copy: Optional[bool] = None) -> pandas.core.series.Series
67,969
pandas.core.series
to_string
Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. Examples -------- >>> ser = pd.Series([1, 2, 3]).to_string() >>> ser '0 1\n1 2\n2 3'
def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. Examples -------- >>> ser = pd.Series([1, 2, 3]).to_string() >>> ser '0 1\\n1 2\\n2 3' """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w", encoding="utf-8") as f: f.write(result) return None
(self, buf: 'FilePath | WriteBuffer[str] | None' = None, na_rep: 'str' = 'NaN', float_format: 'str | None' = None, header: 'bool' = True, index: 'bool' = True, length: 'bool' = False, dtype: 'bool' = False, name: 'bool' = False, max_rows: 'int | None' = None, min_rows: 'int | None' = None) -> 'str | None'
67,970
pandas.core.series
to_timestamp
Cast to DatetimeIndex of Timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. copy : bool, default True Whether or not to return a copy. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` Returns ------- Series with DatetimeIndex Examples -------- >>> idx = pd.PeriodIndex(['2023', '2024', '2025'], freq='Y') >>> s1 = pd.Series([1, 2, 3], index=idx) >>> s1 2023 1 2024 2 2025 3 Freq: Y-DEC, dtype: int64 The resulting frequency of the Timestamps is `YearBegin` >>> s1 = s1.to_timestamp() >>> s1 2023-01-01 1 2024-01-01 2 2025-01-01 3 Freq: YS-JAN, dtype: int64 Using `freq` which is the offset that the Timestamps will have >>> s2 = pd.Series([1, 2, 3], index=idx) >>> s2 = s2.to_timestamp(freq='M') >>> s2 2023-01-31 1 2024-01-31 2 2025-01-31 3 Freq: YE-JAN, dtype: int64
def to_timestamp( self, freq: Frequency | None = None, how: Literal["s", "e", "start", "end"] = "start", copy: bool | None = None, ) -> Series: """ Cast to DatetimeIndex of Timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. copy : bool, default True Whether or not to return a copy. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` Returns ------- Series with DatetimeIndex Examples -------- >>> idx = pd.PeriodIndex(['2023', '2024', '2025'], freq='Y') >>> s1 = pd.Series([1, 2, 3], index=idx) >>> s1 2023 1 2024 2 2025 3 Freq: Y-DEC, dtype: int64 The resulting frequency of the Timestamps is `YearBegin` >>> s1 = s1.to_timestamp() >>> s1 2023-01-01 1 2024-01-01 2 2025-01-01 3 Freq: YS-JAN, dtype: int64 Using `freq` which is the offset that the Timestamps will have >>> s2 = pd.Series([1, 2, 3], index=idx) >>> s2 = s2.to_timestamp(freq='M') >>> s2 2023-01-31 1 2024-01-31 2 2025-01-31 3 Freq: YE-JAN, dtype: int64 """ if not isinstance(self.index, PeriodIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") new_obj = self.copy(deep=copy and not using_copy_on_write()) new_index = self.index.to_timestamp(freq=freq, how=how) setattr(new_obj, "index", new_index) return new_obj
(self, freq: 'Frequency | None' = None, how: "Literal['s', 'e', 'start', 'end']" = 'start', copy: 'bool | None' = None) -> 'Series'
67,973
pandas.core.series
transform
Call ``func`` on self producing a Series with the same axis shape as self. Parameters ---------- func : function, str, list-like or dict-like Function to use for transforming the data. If a function, must either work when passed a Series or when passed to Series.apply. If func is both list-like and dict-like, dict-like behavior takes precedence. Accepted combinations are: - function - string function name - list-like of functions and/or function names, e.g. ``[np.exp, 'sqrt']`` - dict-like of axis labels -> functions, function names or list-like of such. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. *args Positional arguments to pass to `func`. **kwargs Keyword arguments to pass to `func`. Returns ------- Series A Series that must have the same length as self. Raises ------ ValueError : If the returned Series has a different length than self. See Also -------- Series.agg : Only perform aggregating type operations. Series.apply : Invoke function on a Series. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)}) >>> df A B 0 0 1 1 1 2 2 2 3 >>> df.transform(lambda x: x + 1) A B 0 1 2 1 2 3 2 3 4 Even though the resulting Series must have the same length as the input Series, it is possible to provide several input functions: >>> s = pd.Series(range(3)) >>> s 0 0 1 1 2 2 dtype: int64 >>> s.transform([np.sqrt, np.exp]) sqrt exp 0 0.000000 1.000000 1 1.000000 2.718282 2 1.414214 7.389056 You can call transform on a GroupBy object: >>> df = pd.DataFrame({ ... "Date": [ ... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05", ... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"], ... "Data": [5, 8, 6, 1, 50, 100, 60, 120], ... }) >>> df Date Data 0 2015-05-08 5 1 2015-05-07 8 2 2015-05-06 6 3 2015-05-05 1 4 2015-05-08 50 5 2015-05-07 100 6 2015-05-06 60 7 2015-05-05 120 >>> df.groupby('Date')['Data'].transform('sum') 0 55 1 108 2 66 3 121 4 55 5 108 6 66 7 121 Name: Data, dtype: int64 >>> df = pd.DataFrame({ ... "c": [1, 1, 1, 2, 2, 2, 2], ... "type": ["m", "n", "o", "m", "m", "n", "n"] ... }) >>> df c type 0 1 m 1 1 n 2 1 o 3 2 m 4 2 m 5 2 n 6 2 n >>> df['size'] = df.groupby('c')['type'].transform(len) >>> df c type size 0 1 m 3 1 1 n 3 2 1 o 3 3 2 m 4 4 2 m 4 5 2 n 4 6 2 n 4
@doc( _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) ser = ( self.copy(deep=False) if using_copy_on_write() or warn_copy_on_write() else self ) result = SeriesApply(ser, func=func, args=args, kwargs=kwargs).transform() return result
(self, func: 'AggFuncType', axis: 'Axis' = 0, *args, **kwargs) -> 'DataFrame | Series'
67,979
pandas.core.series
unique
Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c']
def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique()
(self) -> 'ArrayLike'
67,980
pandas.core.series
unstack
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. sort : bool, default True Sort the level(s) in the resulting MultiIndex columns. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4
def unstack( self, level: IndexLabel = -1, fill_value: Hashable | None = None, sort: bool = True, ) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. sort : bool, default True Sort the level(s) in the resulting MultiIndex columns. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value, sort)
(self, level: 'IndexLabel' = -1, fill_value: 'Hashable | None' = None, sort: 'bool' = True) -> 'DataFrame'
67,981
pandas.core.series
update
Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64
def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= REF_COUNT: warnings.warn( _chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2, ) elif not PYPY and not using_copy_on_write() and self._is_view_after_cow_rules(): ctr = sys.getrefcount(self) ref_count = REF_COUNT if _check_cacher(self): # see https://github.com/pandas-dev/pandas/pull/56060#discussion_r1399245221 ref_count += 1 if ctr <= ref_count: warnings.warn( _chained_assignment_warning_method_msg, FutureWarning, stacklevel=2, ) if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher()
(self, other: pandas.core.series.Series | collections.abc.Sequence | collections.abc.Mapping) -> NoneType
67,983
pandas.core.series
var
Return unbiased variance over requested axis. Normalized by N-1 by default. This can be changed using the ddof argument. Parameters ---------- axis : {index (0)} For `Series` this parameter is unused and defaults to 0. .. warning:: The behavior of DataFrame.var with ``axis=None`` is deprecated, in a future version this will reduce over both axes and return a scalar To retain the old behavior, pass axis=0 (or do not pass axis). skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : bool, default False Include only float, int, boolean columns. Not implemented for Series. Returns ------- scalar or Series (if level specified) Examples -------- >>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], ... 'age': [21, 25, 62, 43], ... 'height': [1.61, 1.87, 1.49, 2.01]} ... ).set_index('person_id') >>> df age height person_id 0 21 1.61 1 25 1.87 2 62 1.49 3 43 2.01 >>> df.var() age 352.916667 height 0.056367 dtype: float64 Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1: >>> df.var(ddof=0) age 264.687500 height 0.042275 dtype: float64
@doc(make_doc("var", ndim=1)) def var( self, axis: Axis | None = None, skipna: bool = True, ddof: int = 1, numeric_only: bool = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs)
(self, axis: 'Axis | None' = None, skipna: 'bool' = True, ddof: 'int' = 1, numeric_only: 'bool' = False, **kwargs)
67,984
pandas.core.series
view
Create a new view of the Series. .. deprecated:: 2.2.0 ``Series.view`` is deprecated and will be removed in a future version. Use :meth:`Series.astype` as an alternative to change the dtype. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- Use ``astype`` to change the dtype instead.
def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. .. deprecated:: 2.2.0 ``Series.view`` is deprecated and will be removed in a future version. Use :meth:`Series.astype` as an alternative to change the dtype. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- Use ``astype`` to change the dtype instead. """ warnings.warn( "Series.view is deprecated and will be removed in a future version. " "Use ``astype`` as an alternative to change the dtype.", FutureWarning, stacklevel=2, ) # self.array instead of self._values so we piggyback on NumpyExtensionArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) return res_ser.__finalize__(self, method="view")
(self, dtype: 'Dtype | None' = None) -> 'Series'
67,987
pandas.core.dtypes.dtypes
SparseDtype
Dtype for data stored in :class:`SparseArray`. This dtype implements the pandas ExtensionDtype interface. Parameters ---------- dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 The dtype of the underlying array storing the non-fill value values. fill_value : scalar, optional The scalar value not stored in the SparseArray. By default, this depends on `dtype`. =========== ========== dtype na_value =========== ========== float ``np.nan`` int ``0`` bool ``False`` datetime64 ``pd.NaT`` timedelta64 ``pd.NaT`` =========== ========== The default value may be overridden by specifying a `fill_value`. Attributes ---------- None Methods ------- None Examples -------- >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0)) >>> ser 0 1 1 0 2 0 dtype: Sparse[int64, 0] >>> ser.sparse.density 0.3333333333333333
class SparseDtype(ExtensionDtype): """ Dtype for data stored in :class:`SparseArray`. This dtype implements the pandas ExtensionDtype interface. Parameters ---------- dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 The dtype of the underlying array storing the non-fill value values. fill_value : scalar, optional The scalar value not stored in the SparseArray. By default, this depends on `dtype`. =========== ========== dtype na_value =========== ========== float ``np.nan`` int ``0`` bool ``False`` datetime64 ``pd.NaT`` timedelta64 ``pd.NaT`` =========== ========== The default value may be overridden by specifying a `fill_value`. Attributes ---------- None Methods ------- None Examples -------- >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0)) >>> ser 0 1 1 0 2 0 dtype: Sparse[int64, 0] >>> ser.sparse.density 0.3333333333333333 """ _is_immutable = True # We include `_is_na_fill_value` in the metadata to avoid hash collisions # between SparseDtype(float, 0.0) and SparseDtype(float, nan). # Without is_na_fill_value in the comparison, those would be equal since # hash(nan) is (sometimes?) 0. _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: if isinstance(dtype, type(self)): if fill_value is None: fill_value = dtype.fill_value dtype = dtype.subtype from pandas.core.dtypes.common import ( is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.missing import na_value_for_dtype dtype = pandas_dtype(dtype) if is_string_dtype(dtype): dtype = np.dtype("object") if not isinstance(dtype, np.dtype): # GH#53160 raise TypeError("SparseDtype subtype must be a numpy dtype") if fill_value is None: fill_value = na_value_for_dtype(dtype) self._dtype = dtype self._fill_value = fill_value self._check_fill_value() def __hash__(self) -> int: # Python3 doesn't inherit __hash__ when a base class overrides # __eq__, so we explicitly do it here. return super().__hash__() def __eq__(self, other: object) -> bool: # We have to override __eq__ to handle NA values in _metadata. # The base class does simple == checks, which fail for NA. if isinstance(other, str): try: other = self.construct_from_string(other) except TypeError: return False if isinstance(other, type(self)): subtype = self.subtype == other.subtype if self._is_na_fill_value: # this case is complicated by two things: # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) # i.e. we want to treat any floating-point NaN as equal, but # not a floating-point NaN and a datetime NaT. fill_value = ( other._is_na_fill_value and isinstance(self.fill_value, type(other.fill_value)) or isinstance(other.fill_value, type(self.fill_value)) ) else: with warnings.catch_warnings(): # Ignore spurious numpy warning warnings.filterwarnings( "ignore", "elementwise comparison failed", category=DeprecationWarning, ) fill_value = self.fill_value == other.fill_value return subtype and fill_value return False @property def fill_value(self): """ The fill value of the array. Converting the SparseArray to a dense ndarray will fill the array with this value. .. warning:: It's possible to end up with a SparseArray that has ``fill_value`` values in ``sp_values``. This can occur, for example, when setting ``SparseArray.fill_value`` directly. """ return self._fill_value def _check_fill_value(self) -> None: if not lib.is_scalar(self._fill_value): raise ValueError( f"fill_value must be a scalar. Got {self._fill_value} instead" ) from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core.construction import ensure_wrapped_if_datetimelike # GH#23124 require fill_value and subtype to match val = self._fill_value if isna(val): if not is_valid_na_for_dtype(val, self.subtype): warnings.warn( "Allowing arbitrary scalar fill_value in SparseDtype is " "deprecated. In a future version, the fill_value must be " "a valid value for the SparseDtype.subtype.", FutureWarning, stacklevel=find_stack_level(), ) else: dummy = np.empty(0, dtype=self.subtype) dummy = ensure_wrapped_if_datetimelike(dummy) if not can_hold_element(dummy, val): warnings.warn( "Allowing arbitrary scalar fill_value in SparseDtype is " "deprecated. In a future version, the fill_value must be " "a valid value for the SparseDtype.subtype.", FutureWarning, stacklevel=find_stack_level(), ) @property def _is_na_fill_value(self) -> bool: from pandas import isna return isna(self.fill_value) @property def _is_numeric(self) -> bool: return not self.subtype == object @property def _is_boolean(self) -> bool: return self.subtype.kind == "b" @property def kind(self) -> str: """ The sparse kind. Either 'integer', or 'block'. """ return self.subtype.kind @property def type(self): return self.subtype.type @property def subtype(self): return self._dtype @property def name(self) -> str: return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]" def __repr__(self) -> str: return self.name @classmethod def construct_array_type(cls) -> type_t[SparseArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.sparse.array import SparseArray return SparseArray @classmethod def construct_from_string(cls, string: str) -> SparseDtype: """ Construct a SparseDtype from a string form. Parameters ---------- string : str Can take the following forms. string dtype ================ ============================ 'int' SparseDtype[np.int64, 0] 'Sparse' SparseDtype[np.float64, nan] 'Sparse[int]' SparseDtype[np.int64, 0] 'Sparse[int, 0]' SparseDtype[np.int64, 0] ================ ============================ It is not possible to specify non-default fill values with a string. An argument like ``'Sparse[int, 1]'`` will raise a ``TypeError`` because the default fill value for integers is 0. Returns ------- SparseDtype """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'SparseDtype' from '{string}'" if string.startswith("Sparse"): try: sub_type, has_fill_value = cls._parse_subtype(string) except ValueError as err: raise TypeError(msg) from err else: result = SparseDtype(sub_type) msg = ( f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt " "looks like the fill_value in the string is not " "the default for the dtype. Non-default fill_values " "are not supported. Use the 'SparseDtype()' " "constructor instead." ) if has_fill_value and str(result) != string: raise TypeError(msg) return result else: raise TypeError(msg) @staticmethod def _parse_subtype(dtype: str) -> tuple[str, bool]: """ Parse a string to get the subtype Parameters ---------- dtype : str A string like * Sparse[subtype] * Sparse[subtype, fill_value] Returns ------- subtype : str Raises ------ ValueError When the subtype cannot be extracted. """ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$") m = xpr.match(dtype) has_fill_value = False if m: subtype = m.groupdict()["subtype"] has_fill_value = bool(m.groupdict()["fill_value"]) elif dtype == "Sparse": subtype = "float64" else: raise ValueError(f"Cannot parse {dtype}") return subtype, has_fill_value @classmethod def is_dtype(cls, dtype: object) -> bool: dtype = getattr(dtype, "dtype", dtype) if isinstance(dtype, str) and dtype.startswith("Sparse"): sub_type, _ = cls._parse_subtype(dtype) dtype = np.dtype(sub_type) elif isinstance(dtype, cls): return True return isinstance(dtype, np.dtype) or dtype == "Sparse" def update_dtype(self, dtype) -> SparseDtype: """ Convert the SparseDtype to a new dtype. This takes care of converting the ``fill_value``. Parameters ---------- dtype : Union[str, numpy.dtype, SparseDtype] The new dtype to use. * For a SparseDtype, it is simply returned * For a NumPy dtype (or str), the current fill value is converted to the new dtype, and a SparseDtype with `dtype` and the new fill value is returned. Returns ------- SparseDtype A new SparseDtype with the correct `dtype` and fill value for that `dtype`. Raises ------ ValueError When the current fill value cannot be converted to the new `dtype` (e.g. trying to convert ``np.nan`` to an integer dtype). Examples -------- >>> SparseDtype(int, 0).update_dtype(float) Sparse[float64, 0.0] >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) Sparse[float64, nan] """ from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.common import pandas_dtype cls = type(self) dtype = pandas_dtype(dtype) if not isinstance(dtype, cls): if not isinstance(dtype, np.dtype): raise TypeError("sparse arrays of extension dtypes not supported") fv_asarray = np.atleast_1d(np.array(self.fill_value)) fvarr = astype_array(fv_asarray, dtype) # NB: not fv_0d.item(), as that casts dt64->int fill_value = fvarr[0] dtype = cls(dtype, fill_value=fill_value) return dtype @property def _subtype_with_str(self): """ Whether the SparseDtype's subtype should be considered ``str``. Typically, pandas will store string data in an object-dtype array. When converting values to a dtype, e.g. in ``.astype``, we need to be more specific, we need the actual underlying type. Returns ------- >>> SparseDtype(int, 1)._subtype_with_str dtype('int64') >>> SparseDtype(object, 1)._subtype_with_str dtype('O') >>> dtype = SparseDtype(str, '') >>> dtype.subtype dtype('O') >>> dtype._subtype_with_str <class 'str'> """ if isinstance(self.fill_value, str): return type(self.fill_value) return self.subtype def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # TODO for now only handle SparseDtypes and numpy dtypes => extend # with other compatible extension dtypes from pandas.core.dtypes.cast import np_find_common_type if any( isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype) for x in dtypes ): return None fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)] fill_value = fill_values[0] from pandas import isna # np.nan isn't a singleton, so we may end up with multiple # NaNs here, so we ignore the all NA case too. if not (len(set(fill_values)) == 1 or isna(fill_values).all()): warnings.warn( "Concatenating sparse arrays with multiple fill " f"values: '{fill_values}'. Picking the first and " "converting the rest.", PerformanceWarning, stacklevel=find_stack_level(), ) np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes) return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
(dtype: 'Dtype' = <class 'numpy.float64'>, fill_value: 'Any' = None) -> 'None'
67,988
pandas.core.dtypes.dtypes
__eq__
null
def __eq__(self, other: object) -> bool: # We have to override __eq__ to handle NA values in _metadata. # The base class does simple == checks, which fail for NA. if isinstance(other, str): try: other = self.construct_from_string(other) except TypeError: return False if isinstance(other, type(self)): subtype = self.subtype == other.subtype if self._is_na_fill_value: # this case is complicated by two things: # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) # i.e. we want to treat any floating-point NaN as equal, but # not a floating-point NaN and a datetime NaT. fill_value = ( other._is_na_fill_value and isinstance(self.fill_value, type(other.fill_value)) or isinstance(other.fill_value, type(self.fill_value)) ) else: with warnings.catch_warnings(): # Ignore spurious numpy warning warnings.filterwarnings( "ignore", "elementwise comparison failed", category=DeprecationWarning, ) fill_value = self.fill_value == other.fill_value return subtype and fill_value return False
(self, other: object) -> bool
67,989
pandas.core.dtypes.dtypes
__hash__
null
def __hash__(self) -> int: # Python3 doesn't inherit __hash__ when a base class overrides # __eq__, so we explicitly do it here. return super().__hash__()
(self) -> int
67,990
pandas.core.dtypes.dtypes
__init__
null
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: if isinstance(dtype, type(self)): if fill_value is None: fill_value = dtype.fill_value dtype = dtype.subtype from pandas.core.dtypes.common import ( is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.missing import na_value_for_dtype dtype = pandas_dtype(dtype) if is_string_dtype(dtype): dtype = np.dtype("object") if not isinstance(dtype, np.dtype): # GH#53160 raise TypeError("SparseDtype subtype must be a numpy dtype") if fill_value is None: fill_value = na_value_for_dtype(dtype) self._dtype = dtype self._fill_value = fill_value self._check_fill_value()
(self, dtype: 'Dtype' = <class 'numpy.float64'>, fill_value: 'Any' = None) -> 'None'
67,994
pandas.core.dtypes.dtypes
_check_fill_value
null
def _check_fill_value(self) -> None: if not lib.is_scalar(self._fill_value): raise ValueError( f"fill_value must be a scalar. Got {self._fill_value} instead" ) from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, ) from pandas.core.construction import ensure_wrapped_if_datetimelike # GH#23124 require fill_value and subtype to match val = self._fill_value if isna(val): if not is_valid_na_for_dtype(val, self.subtype): warnings.warn( "Allowing arbitrary scalar fill_value in SparseDtype is " "deprecated. In a future version, the fill_value must be " "a valid value for the SparseDtype.subtype.", FutureWarning, stacklevel=find_stack_level(), ) else: dummy = np.empty(0, dtype=self.subtype) dummy = ensure_wrapped_if_datetimelike(dummy) if not can_hold_element(dummy, val): warnings.warn( "Allowing arbitrary scalar fill_value in SparseDtype is " "deprecated. In a future version, the fill_value must be " "a valid value for the SparseDtype.subtype.", FutureWarning, stacklevel=find_stack_level(), )
(self) -> NoneType
67,995
pandas.core.dtypes.dtypes
_get_common_dtype
null
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # TODO for now only handle SparseDtypes and numpy dtypes => extend # with other compatible extension dtypes from pandas.core.dtypes.cast import np_find_common_type if any( isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype) for x in dtypes ): return None fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)] fill_value = fill_values[0] from pandas import isna # np.nan isn't a singleton, so we may end up with multiple # NaNs here, so we ignore the all NA case too. if not (len(set(fill_values)) == 1 or isna(fill_values).all()): warnings.warn( "Concatenating sparse arrays with multiple fill " f"values: '{fill_values}'. Picking the first and " "converting the rest.", PerformanceWarning, stacklevel=find_stack_level(), ) np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes) return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value)
(self, dtypes: 'list[DtypeObj]') -> 'DtypeObj | None'
67,996
pandas.core.dtypes.dtypes
_parse_subtype
Parse a string to get the subtype Parameters ---------- dtype : str A string like * Sparse[subtype] * Sparse[subtype, fill_value] Returns ------- subtype : str Raises ------ ValueError When the subtype cannot be extracted.
@staticmethod def _parse_subtype(dtype: str) -> tuple[str, bool]: """ Parse a string to get the subtype Parameters ---------- dtype : str A string like * Sparse[subtype] * Sparse[subtype, fill_value] Returns ------- subtype : str Raises ------ ValueError When the subtype cannot be extracted. """ xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$") m = xpr.match(dtype) has_fill_value = False if m: subtype = m.groupdict()["subtype"] has_fill_value = bool(m.groupdict()["fill_value"]) elif dtype == "Sparse": subtype = "float64" else: raise ValueError(f"Cannot parse {dtype}") return subtype, has_fill_value
(dtype: str) -> tuple[str, bool]
67,998
pandas.core.dtypes.dtypes
update_dtype
Convert the SparseDtype to a new dtype. This takes care of converting the ``fill_value``. Parameters ---------- dtype : Union[str, numpy.dtype, SparseDtype] The new dtype to use. * For a SparseDtype, it is simply returned * For a NumPy dtype (or str), the current fill value is converted to the new dtype, and a SparseDtype with `dtype` and the new fill value is returned. Returns ------- SparseDtype A new SparseDtype with the correct `dtype` and fill value for that `dtype`. Raises ------ ValueError When the current fill value cannot be converted to the new `dtype` (e.g. trying to convert ``np.nan`` to an integer dtype). Examples -------- >>> SparseDtype(int, 0).update_dtype(float) Sparse[float64, 0.0] >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) Sparse[float64, nan]
def update_dtype(self, dtype) -> SparseDtype: """ Convert the SparseDtype to a new dtype. This takes care of converting the ``fill_value``. Parameters ---------- dtype : Union[str, numpy.dtype, SparseDtype] The new dtype to use. * For a SparseDtype, it is simply returned * For a NumPy dtype (or str), the current fill value is converted to the new dtype, and a SparseDtype with `dtype` and the new fill value is returned. Returns ------- SparseDtype A new SparseDtype with the correct `dtype` and fill value for that `dtype`. Raises ------ ValueError When the current fill value cannot be converted to the new `dtype` (e.g. trying to convert ``np.nan`` to an integer dtype). Examples -------- >>> SparseDtype(int, 0).update_dtype(float) Sparse[float64, 0.0] >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) Sparse[float64, nan] """ from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.common import pandas_dtype cls = type(self) dtype = pandas_dtype(dtype) if not isinstance(dtype, cls): if not isinstance(dtype, np.dtype): raise TypeError("sparse arrays of extension dtypes not supported") fv_asarray = np.atleast_1d(np.array(self.fill_value)) fvarr = astype_array(fv_asarray, dtype) # NB: not fv_0d.item(), as that casts dt64->int fill_value = fvarr[0] dtype = cls(dtype, fill_value=fill_value) return dtype
(self, dtype) -> pandas.core.dtypes.dtypes.SparseDtype
67,999
pandas.core.arrays.string_
StringDtype
Extension dtype for string data. .. warning:: StringDtype is considered experimental. The implementation and parts of the API may change without warning. Parameters ---------- storage : {"python", "pyarrow", "pyarrow_numpy"}, optional If not given, the value of ``pd.options.mode.string_storage``. Attributes ---------- None Methods ------- None Examples -------- >>> pd.StringDtype() string[python] >>> pd.StringDtype(storage="pyarrow") string[pyarrow]
class StringDtype(StorageExtensionDtype): """ Extension dtype for string data. .. warning:: StringDtype is considered experimental. The implementation and parts of the API may change without warning. Parameters ---------- storage : {"python", "pyarrow", "pyarrow_numpy"}, optional If not given, the value of ``pd.options.mode.string_storage``. Attributes ---------- None Methods ------- None Examples -------- >>> pd.StringDtype() string[python] >>> pd.StringDtype(storage="pyarrow") string[pyarrow] """ # error: Cannot override instance variable (previously declared on # base class "StorageExtensionDtype") with class variable name: ClassVar[str] = "string" # type: ignore[misc] #: StringDtype().na_value uses pandas.NA except the implementation that # follows NumPy semantics, which uses nan. @property def na_value(self) -> libmissing.NAType | float: # type: ignore[override] if self.storage == "pyarrow_numpy": return np.nan else: return libmissing.NA _metadata = ("storage",) def __init__(self, storage=None) -> None: if storage is None: infer_string = get_option("future.infer_string") if infer_string: storage = "pyarrow_numpy" else: storage = get_option("mode.string_storage") if storage not in {"python", "pyarrow", "pyarrow_numpy"}: raise ValueError( f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. " f"Got {storage} instead." ) if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under10p1: raise ImportError( "pyarrow>=10.0.1 is required for PyArrow backed StringArray." ) self.storage = storage @property def type(self) -> type[str]: return str @classmethod def construct_from_string(cls, string) -> Self: """ Construct a StringDtype from a string. Parameters ---------- string : str The type of the name. The storage type will be taking from `string`. Valid options and their storage types are ========================== ============================================== string result storage ========================== ============================================== ``'string'`` pd.options.mode.string_storage, default python ``'string[python]'`` python ``'string[pyarrow]'`` pyarrow ========================== ============================================== Returns ------- StringDtype Raise ----- TypeError If the string is not a valid option. """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if string == "string": return cls() elif string == "string[python]": return cls(storage="python") elif string == "string[pyarrow]": return cls(storage="pyarrow") elif string == "string[pyarrow_numpy]": return cls(storage="pyarrow_numpy") else: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") # https://github.com/pandas-dev/pandas/issues/36126 # error: Signature of "construct_array_type" incompatible with supertype # "ExtensionDtype" def construct_array_type( # type: ignore[override] self, ) -> type_t[BaseStringArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.string_arrow import ( ArrowStringArray, ArrowStringArrayNumpySemantics, ) if self.storage == "python": return StringArray elif self.storage == "pyarrow": return ArrowStringArray else: return ArrowStringArrayNumpySemantics def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> BaseStringArray: """ Construct StringArray from pyarrow Array/ChunkedArray. """ if self.storage == "pyarrow": from pandas.core.arrays.string_arrow import ArrowStringArray return ArrowStringArray(array) elif self.storage == "pyarrow_numpy": from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics return ArrowStringArrayNumpySemantics(array) else: import pyarrow if isinstance(array, pyarrow.Array): chunks = [array] else: # pyarrow.ChunkedArray chunks = array.chunks results = [] for arr in chunks: # convert chunk by chunk to numpy and concatenate then, to avoid # overflow for large string data when concatenating the pyarrow arrays arr = arr.to_numpy(zero_copy_only=False) arr = ensure_string_array(arr, na_value=libmissing.NA) results.append(arr) if len(chunks) == 0: arr = np.array([], dtype=object) else: arr = np.concatenate(results) # Bypass validation inside StringArray constructor, see GH#47781 new_string_array = StringArray.__new__(StringArray) NDArrayBacked.__init__( new_string_array, arr, StringDtype(storage="python"), ) return new_string_array
(storage=None) -> 'None'
68,000
pandas.core.dtypes.base
__eq__
null
def __eq__(self, other: object) -> bool: if isinstance(other, str) and other == self.name: return True return super().__eq__(other)
(self, other: object) -> bool
68,001
pandas.core.arrays.string_
__from_arrow__
Construct StringArray from pyarrow Array/ChunkedArray.
def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> BaseStringArray: """ Construct StringArray from pyarrow Array/ChunkedArray. """ if self.storage == "pyarrow": from pandas.core.arrays.string_arrow import ArrowStringArray return ArrowStringArray(array) elif self.storage == "pyarrow_numpy": from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics return ArrowStringArrayNumpySemantics(array) else: import pyarrow if isinstance(array, pyarrow.Array): chunks = [array] else: # pyarrow.ChunkedArray chunks = array.chunks results = [] for arr in chunks: # convert chunk by chunk to numpy and concatenate then, to avoid # overflow for large string data when concatenating the pyarrow arrays arr = arr.to_numpy(zero_copy_only=False) arr = ensure_string_array(arr, na_value=libmissing.NA) results.append(arr) if len(chunks) == 0: arr = np.array([], dtype=object) else: arr = np.concatenate(results) # Bypass validation inside StringArray constructor, see GH#47781 new_string_array = StringArray.__new__(StringArray) NDArrayBacked.__init__( new_string_array, arr, StringDtype(storage="python"), ) return new_string_array
(self, array: 'pyarrow.Array | pyarrow.ChunkedArray') -> 'BaseStringArray'
68,002
pandas.core.dtypes.base
__hash__
null
def __hash__(self) -> int: # custom __eq__ so have to override __hash__ return super().__hash__()
(self) -> int
68,003
pandas.core.arrays.string_
__init__
null
def __init__(self, storage=None) -> None: if storage is None: infer_string = get_option("future.infer_string") if infer_string: storage = "pyarrow_numpy" else: storage = get_option("mode.string_storage") if storage not in {"python", "pyarrow", "pyarrow_numpy"}: raise ValueError( f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. " f"Got {storage} instead." ) if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under10p1: raise ImportError( "pyarrow>=10.0.1 is required for PyArrow backed StringArray." ) self.storage = storage
(self, storage=None) -> NoneType
68,005
pandas.core.dtypes.base
__repr__
null
def __repr__(self) -> str: return f"{self.name}[{self.storage}]"
(self) -> str
68,008
pandas.core.arrays.string_
construct_array_type
Return the array type associated with this dtype. Returns ------- type
def construct_array_type( # type: ignore[override] self, ) -> type_t[BaseStringArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.string_arrow import ( ArrowStringArray, ArrowStringArrayNumpySemantics, ) if self.storage == "python": return StringArray elif self.storage == "pyarrow": return ArrowStringArray else: return ArrowStringArrayNumpySemantics
(self) -> 'type_t[BaseStringArray]'
68,010
pandas._libs.tslibs.timedeltas
Timedelta
Represents a duration, the difference between two dates or times. Timedelta is the pandas equivalent of python's ``datetime.timedelta`` and is interchangeable with it in most cases. Parameters ---------- value : Timedelta, timedelta, np.timedelta64, str, or int unit : str, default 'ns' Denote the unit of the input, if input is an integer. Possible values: * 'W', or 'D' * 'days', or 'day' * 'hours', 'hour', 'hr', or 'h' * 'minutes', 'minute', 'min', or 'm' * 'seconds', 'second', 'sec', or 's' * 'milliseconds', 'millisecond', 'millis', 'milli', or 'ms' * 'microseconds', 'microsecond', 'micros', 'micro', or 'us' * 'nanoseconds', 'nanosecond', 'nanos', 'nano', or 'ns'. .. deprecated:: 2.2.0 Values `H`, `T`, `S`, `L`, `U`, and `N` are deprecated in favour of the values `h`, `min`, `s`, `ms`, `us`, and `ns`. **kwargs Available kwargs: {days, seconds, microseconds, milliseconds, minutes, hours, weeks}. Values for construction in compat with datetime.timedelta. Numpy ints and floats will be coerced to python ints and floats. Notes ----- The constructor may take in either both values of value and unit or kwargs as above. Either one of them must be used during initialization The ``.value`` attribute is always in ns. If the precision is higher than nanoseconds, the precision of the duration is truncated to nanoseconds. Examples -------- Here we initialize Timedelta object with both value and unit >>> td = pd.Timedelta(1, "d") >>> td Timedelta('1 days 00:00:00') Here we initialize the Timedelta object with kwargs >>> td2 = pd.Timedelta(days=1) >>> td2 Timedelta('1 days 00:00:00') We see that either way we get the same result
from pandas._libs.tslibs.timedeltas import Timedelta
(value=<object object at 0x7f48aa5572c0>, unit=None, **kwargs)
68,011
pandas.core.indexes.timedeltas
TimedeltaIndex
Immutable Index of timedelta64 data. Represented internally as int64, and scalars returned Timedelta objects. Parameters ---------- data : array-like (1-dimensional), optional Optional timedelta-like data to construct index with. unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional The unit of ``data``. .. deprecated:: 2.2.0 Use ``pd.to_timedelta`` instead. freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string ``'infer'`` can be passed in order to set the frequency of the index as the inferred frequency upon creation. dtype : numpy.dtype or str, default None Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``, ``timedelta64[ms]``, and ``timedelta64[s]``. copy : bool Make a copy of input array. name : object Name to be stored in the index. Attributes ---------- days seconds microseconds nanoseconds components inferred_freq Methods ------- to_pytimedelta to_series round floor ceil to_frame mean See Also -------- Index : The base pandas Index type. Timedelta : Represents a duration between two dates or times. DatetimeIndex : Index of datetime64 data. PeriodIndex : Index of Period data. timedelta_range : Create a fixed-frequency TimedeltaIndex. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days']) TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) We can also let pandas infer the frequency when possible. >>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq='infer') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D')
class TimedeltaIndex(DatetimeTimedeltaMixin): """ Immutable Index of timedelta64 data. Represented internally as int64, and scalars returned Timedelta objects. Parameters ---------- data : array-like (1-dimensional), optional Optional timedelta-like data to construct index with. unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional The unit of ``data``. .. deprecated:: 2.2.0 Use ``pd.to_timedelta`` instead. freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string ``'infer'`` can be passed in order to set the frequency of the index as the inferred frequency upon creation. dtype : numpy.dtype or str, default None Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``, ``timedelta64[ms]``, and ``timedelta64[s]``. copy : bool Make a copy of input array. name : object Name to be stored in the index. Attributes ---------- days seconds microseconds nanoseconds components inferred_freq Methods ------- to_pytimedelta to_series round floor ceil to_frame mean See Also -------- Index : The base pandas Index type. Timedelta : Represents a duration between two dates or times. DatetimeIndex : Index of datetime64 data. PeriodIndex : Index of Period data. timedelta_range : Create a fixed-frequency TimedeltaIndex. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days']) TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) We can also let pandas infer the frequency when possible. >>> pd.TimedeltaIndex(np.arange(5) * 24 * 3600 * 1e9, freq='infer') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') """ _typ = "timedeltaindex" _data_cls = TimedeltaArray @property def _engine_type(self) -> type[libindex.TimedeltaEngine]: return libindex.TimedeltaEngine _data: TimedeltaArray # Use base class method instead of DatetimeTimedeltaMixin._get_string_slice _get_string_slice = Index._get_string_slice # error: Signature of "_resolution_obj" incompatible with supertype # "DatetimeIndexOpsMixin" @property def _resolution_obj(self) -> Resolution | None: # type: ignore[override] return self._data._resolution_obj # ------------------------------------------------------------------- # Constructors def __new__( cls, data=None, unit=lib.no_default, freq=lib.no_default, closed=lib.no_default, dtype=None, copy: bool = False, name=None, ): if closed is not lib.no_default: # GH#52628 warnings.warn( f"The 'closed' keyword in {cls.__name__} construction is " "deprecated and will be removed in a future version.", FutureWarning, stacklevel=find_stack_level(), ) if unit is not lib.no_default: # GH#55499 warnings.warn( f"The 'unit' keyword in {cls.__name__} construction is " "deprecated and will be removed in a future version. " "Use pd.to_timedelta instead.", FutureWarning, stacklevel=find_stack_level(), ) else: unit = None name = maybe_extract_name(name, data, cls) if is_scalar(data): cls._raise_scalar_data_error(data) disallow_ambiguous_unit(unit) if dtype is not None: dtype = pandas_dtype(dtype) if ( isinstance(data, TimedeltaArray) and freq is lib.no_default and (dtype is None or dtype == data.dtype) ): if copy: data = data.copy() return cls._simple_new(data, name=name) if ( isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None and (dtype is None or dtype == data.dtype) ): if copy: return data.copy() else: return data._view() # - Cases checked above all return/raise before reaching here - # tdarr = TimedeltaArray._from_sequence_not_strict( data, freq=freq, unit=unit, dtype=dtype, copy=copy ) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references return cls._simple_new(tdarr, name=name, refs=refs) # ------------------------------------------------------------------- def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype # ------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location for requested label Returns ------- loc : int, slice, or ndarray[int] """ self._check_indexing_error(key) try: key = self._data._validate_scalar(key, unbox=False) except TypeError as err: raise KeyError(key) from err return Index.get_loc(self, key) def _parse_with_reso(self, label: str): # the "with_reso" is a no-op for TimedeltaIndex parsed = Timedelta(label) return parsed, None def _parsed_string_to_bounds(self, reso, parsed: Timedelta): # reso is unused, included to match signature of DTI/PI lbound = parsed.round(parsed.resolution_string) rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns") return lbound, rbound # ------------------------------------------------------------------- @property def inferred_type(self) -> str: return "timedelta64"
(data=None, unit=<no_default>, freq=<no_default>, closed=<no_default>, dtype=None, copy: 'bool' = False, name=None)
68,039
pandas.core.indexes.timedeltas
__new__
null
def __new__( cls, data=None, unit=lib.no_default, freq=lib.no_default, closed=lib.no_default, dtype=None, copy: bool = False, name=None, ): if closed is not lib.no_default: # GH#52628 warnings.warn( f"The 'closed' keyword in {cls.__name__} construction is " "deprecated and will be removed in a future version.", FutureWarning, stacklevel=find_stack_level(), ) if unit is not lib.no_default: # GH#55499 warnings.warn( f"The 'unit' keyword in {cls.__name__} construction is " "deprecated and will be removed in a future version. " "Use pd.to_timedelta instead.", FutureWarning, stacklevel=find_stack_level(), ) else: unit = None name = maybe_extract_name(name, data, cls) if is_scalar(data): cls._raise_scalar_data_error(data) disallow_ambiguous_unit(unit) if dtype is not None: dtype = pandas_dtype(dtype) if ( isinstance(data, TimedeltaArray) and freq is lib.no_default and (dtype is None or dtype == data.dtype) ): if copy: data = data.copy() return cls._simple_new(data, name=name) if ( isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None and (dtype is None or dtype == data.dtype) ): if copy: return data.copy() else: return data._view() # - Cases checked above all return/raise before reaching here - # tdarr = TimedeltaArray._from_sequence_not_strict( data, freq=freq, unit=unit, dtype=dtype, copy=copy ) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references return cls._simple_new(tdarr, name=name, refs=refs)
(cls, data=None, unit=<no_default>, freq=<no_default>, closed=<no_default>, dtype=None, copy: bool = False, name=None)
68,068
pandas.core.indexes.datetimelike
_can_range_setop
null
def _can_range_setop(self, other) -> bool: return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
(self, other) -> bool
68,119
pandas.core.indexes.timedeltas
_is_comparable_dtype
Can we compare values of the given dtype to our own?
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype
(self, dtype: 'DtypeObj') -> 'bool'
68,133
pandas.core.indexes.datetimelike
_maybe_cast_slice_bound
If label is a string, cast it to scalar type according to resolution. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller.
def _maybe_cast_slice_bound(self, label, side: str): """ If label is a string, cast it to scalar type according to resolution. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ if isinstance(label, str): try: parsed, reso = self._parse_with_reso(label) except ValueError as err: # DTI -> parsing.DateParseError # TDI -> 'unit abbreviation w/o a number' # PI -> string cannot be parsed as datetime-like self._raise_invalid_indexer("slice", label, err) lower, upper = self._parsed_string_to_bounds(reso, parsed) return lower if side == "left" else upper elif not isinstance(label, self._data._recognized_scalars): self._raise_invalid_indexer("slice", label) return label
(self, label, side: str)
68,142
pandas.core.indexes.timedeltas
_parse_with_reso
null
def _parse_with_reso(self, label: str): # the "with_reso" is a no-op for TimedeltaIndex parsed = Timedelta(label) return parsed, None
(self, label: str)
68,143
pandas.core.indexes.timedeltas
_parsed_string_to_bounds
null
def _parsed_string_to_bounds(self, reso, parsed: Timedelta): # reso is unused, included to match signature of DTI/PI lbound = parsed.round(parsed.resolution_string) rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns") return lbound, rbound
(self, reso, parsed: pandas._libs.tslibs.timedeltas.Timedelta)
68,184
pandas.core.indexes.datetimelike
as_unit
Convert to a dtype with the given unit resolution. Parameters ---------- unit : {'s', 'ms', 'us', 'ns'} Returns ------- same type as self Examples -------- For :class:`pandas.DatetimeIndex`: >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006']) >>> idx DatetimeIndex(['2020-01-02 01:02:03.004005006'], dtype='datetime64[ns]', freq=None) >>> idx.as_unit('s') DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None) For :class:`pandas.TimedeltaIndex`: >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns']) >>> tdelta_idx TimedeltaIndex(['1 days 00:03:00.000002042'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.as_unit('s') TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
def as_unit(self, unit: str) -> Self: """ Convert to a dtype with the given unit resolution. Parameters ---------- unit : {'s', 'ms', 'us', 'ns'} Returns ------- same type as self Examples -------- For :class:`pandas.DatetimeIndex`: >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006']) >>> idx DatetimeIndex(['2020-01-02 01:02:03.004005006'], dtype='datetime64[ns]', freq=None) >>> idx.as_unit('s') DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None) For :class:`pandas.TimedeltaIndex`: >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns']) >>> tdelta_idx TimedeltaIndex(['1 days 00:03:00.000002042'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.as_unit('s') TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None) """ arr = self._data.as_unit(unit) return type(self)._simple_new(arr, name=self.name)
(self, unit: 'str') -> 'Self'
68,207
pandas.core.indexes.timedeltas
get_loc
Get integer location for requested label Returns ------- loc : int, slice, or ndarray[int]
def get_loc(self, key): """ Get integer location for requested label Returns ------- loc : int, slice, or ndarray[int] """ self._check_indexing_error(key) try: key = self._data._validate_scalar(key, unbox=False) except TypeError as err: raise KeyError(key) from err return Index.get_loc(self, key)
(self, key)
68,259
pandas.core.indexes.extension
to_pytimedelta
Return an ndarray of datetime.timedelta objects. Returns ------- numpy.ndarray Examples -------- >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D') >>> tdelta_idx TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.to_pytimedelta() array([datetime.timedelta(days=1), datetime.timedelta(days=2), datetime.timedelta(days=3)], dtype=object)
def _inherit_from_data( name: str, delegate: type, cache: bool = False, wrap: bool = False ): """ Make an alias for a method of the underlying ExtensionArray. Parameters ---------- name : str Name of an attribute the class should inherit from its EA parent. delegate : class cache : bool, default False Whether to convert wrapped properties into cache_readonly wrap : bool, default False Whether to wrap the inherited result in an Index. Returns ------- attribute, method, property, or cache_readonly """ attr = getattr(delegate, name) if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor": # getset_descriptor i.e. property defined in cython class if cache: def cached(self): return getattr(self._data, name) cached.__name__ = name cached.__doc__ = attr.__doc__ method = cache_readonly(cached) else: def fget(self): result = getattr(self._data, name) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name) return result def fset(self, value) -> None: setattr(self._data, name, value) fget.__name__ = name fget.__doc__ = attr.__doc__ method = property(fget, fset) elif not callable(attr): # just a normal attribute, no wrapping method = attr else: # error: Incompatible redefinition (redefinition with type "Callable[[Any, # VarArg(Any), KwArg(Any)], Any]", original type "property") def method(self, *args, **kwargs): # type: ignore[misc] if "inplace" in kwargs: raise ValueError(f"cannot use inplace with {type(self).__name__}") result = attr(self._data, *args, **kwargs) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name) return result # error: "property" has no attribute "__name__" method.__name__ = name # type: ignore[attr-defined] method.__doc__ = attr.__doc__ return method
(self, *args, **kwargs)
68,262
pandas.core.indexes.extension
total_seconds
Return total duration of each element expressed in seconds. This method is available directly on TimedeltaArray, TimedeltaIndex and on Series containing timedelta values under the ``.dt`` namespace. Returns ------- ndarray, Index or Series When the calling object is a TimedeltaArray, the return type is ndarray. When the calling object is a TimedeltaIndex, the return type is an Index with a float64 dtype. When the calling object is a Series, the return type is Series of type `float64` whose index is the same as the original. See Also -------- datetime.timedelta.total_seconds : Standard library version of this method. TimedeltaIndex.components : Return a DataFrame with components of each Timedelta. Examples -------- **Series** >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) >>> s 0 0 days 1 1 days 2 2 days 3 3 days 4 4 days dtype: timedelta64[ns] >>> s.dt.total_seconds() 0 0.0 1 86400.0 2 172800.0 3 259200.0 4 345600.0 dtype: float64 **TimedeltaIndex** >>> idx = pd.to_timedelta(np.arange(5), unit='d') >>> idx TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) >>> idx.total_seconds() Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64')
def _inherit_from_data( name: str, delegate: type, cache: bool = False, wrap: bool = False ): """ Make an alias for a method of the underlying ExtensionArray. Parameters ---------- name : str Name of an attribute the class should inherit from its EA parent. delegate : class cache : bool, default False Whether to convert wrapped properties into cache_readonly wrap : bool, default False Whether to wrap the inherited result in an Index. Returns ------- attribute, method, property, or cache_readonly """ attr = getattr(delegate, name) if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor": # getset_descriptor i.e. property defined in cython class if cache: def cached(self): return getattr(self._data, name) cached.__name__ = name cached.__doc__ = attr.__doc__ method = cache_readonly(cached) else: def fget(self): result = getattr(self._data, name) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name) return result def fset(self, value) -> None: setattr(self._data, name, value) fget.__name__ = name fget.__doc__ = attr.__doc__ method = property(fget, fset) elif not callable(attr): # just a normal attribute, no wrapping method = attr else: # error: Incompatible redefinition (redefinition with type "Callable[[Any, # VarArg(Any), KwArg(Any)], Any]", original type "property") def method(self, *args, **kwargs): # type: ignore[misc] if "inplace" in kwargs: raise ValueError(f"cannot use inplace with {type(self).__name__}") result = attr(self._data, *args, **kwargs) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name) return result # error: "property" has no attribute "__name__" method.__name__ = name # type: ignore[attr-defined] method.__doc__ = attr.__doc__ return method
(self, *args, **kwargs)
68,269
pandas._libs.tslibs.timestamps
Timestamp
Pandas replacement for python datetime.datetime object. Timestamp is the pandas equivalent of python's Datetime and is interchangeable with it in most cases. It's the type used for the entries that make up a DatetimeIndex, and other timeseries oriented data structures in pandas. Parameters ---------- ts_input : datetime-like, str, int, float Value to be converted to Timestamp. year, month, day : int hour, minute, second, microsecond : int, optional, default 0 tzinfo : datetime.tzinfo, optional, default None nanosecond : int, optional, default 0 tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will have. unit : str Unit used for conversion if ts_input is of type int or float. The valid values are 'D', 'h', 'm', 's', 'ms', 'us', and 'ns'. For example, 's' means seconds and 'ms' means milliseconds. For float inputs, the result will be stored in nanoseconds, and the unit attribute will be set as ``'ns'``. fold : {0, 1}, default None, keyword-only Due to daylight saving time, one wall clock time can occur twice when shifting from summer to winter time; fold describes whether the datetime-like corresponds to the first (0) or the second time (1) the wall clock hits the ambiguous time. Notes ----- There are essentially three calling conventions for the constructor. The primary form accepts four parameters. They can be passed by position or keyword. The other two forms mimic the parameters from ``datetime.datetime``. They can be passed by either position or keyword, but not both mixed together. Examples -------- Using the primary calling convention: This converts a datetime-like string >>> pd.Timestamp('2017-01-01T12') Timestamp('2017-01-01 12:00:00') This converts a float representing a Unix epoch in units of seconds >>> pd.Timestamp(1513393355.5, unit='s') Timestamp('2017-12-16 03:02:35.500000') This converts an int representing a Unix-epoch in units of seconds and for a particular timezone >>> pd.Timestamp(1513393355, unit='s', tz='US/Pacific') Timestamp('2017-12-15 19:02:35-0800', tz='US/Pacific') Using the other two forms that mimic the API for ``datetime.datetime``: >>> pd.Timestamp(2017, 1, 1, 12) Timestamp('2017-01-01 12:00:00') >>> pd.Timestamp(year=2017, month=1, day=1, hour=12) Timestamp('2017-01-01 12:00:00')
from pandas._libs.tslibs.timestamps import Timestamp
(ts_input=<object object at 0x7f48aa5573e0>, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None, *, nanosecond=None, tz=None, unit=None, fold=None)
68,270
pandas.core.arrays.integer
UInt16Dtype
An ExtensionDtype for uint16 integer data. Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`. Attributes ---------- None Methods ------- None Examples -------- For Int8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype()) >>> ser.dtype Int8Dtype() For Int16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype()) >>> ser.dtype Int16Dtype() For Int32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype()) >>> ser.dtype Int32Dtype() For Int64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype()) >>> ser.dtype Int64Dtype() For UInt8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype()) >>> ser.dtype UInt8Dtype() For UInt16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype()) >>> ser.dtype UInt16Dtype() For UInt32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype()) >>> ser.dtype UInt32Dtype() For UInt64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype()) >>> ser.dtype UInt64Dtype()
class UInt16Dtype(IntegerDtype): type = np.uint16 name: ClassVar[str] = "UInt16" __doc__ = _dtype_docstring.format(dtype="uint16")
()
68,280
pandas.core.arrays.integer
UInt32Dtype
An ExtensionDtype for uint32 integer data. Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`. Attributes ---------- None Methods ------- None Examples -------- For Int8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype()) >>> ser.dtype Int8Dtype() For Int16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype()) >>> ser.dtype Int16Dtype() For Int32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype()) >>> ser.dtype Int32Dtype() For Int64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype()) >>> ser.dtype Int64Dtype() For UInt8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype()) >>> ser.dtype UInt8Dtype() For UInt16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype()) >>> ser.dtype UInt16Dtype() For UInt32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype()) >>> ser.dtype UInt32Dtype() For UInt64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype()) >>> ser.dtype UInt64Dtype()
class UInt32Dtype(IntegerDtype): type = np.uint32 name: ClassVar[str] = "UInt32" __doc__ = _dtype_docstring.format(dtype="uint32")
()
68,290
pandas.core.arrays.integer
UInt64Dtype
An ExtensionDtype for uint64 integer data. Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`. Attributes ---------- None Methods ------- None Examples -------- For Int8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype()) >>> ser.dtype Int8Dtype() For Int16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype()) >>> ser.dtype Int16Dtype() For Int32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype()) >>> ser.dtype Int32Dtype() For Int64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype()) >>> ser.dtype Int64Dtype() For UInt8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype()) >>> ser.dtype UInt8Dtype() For UInt16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype()) >>> ser.dtype UInt16Dtype() For UInt32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype()) >>> ser.dtype UInt32Dtype() For UInt64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype()) >>> ser.dtype UInt64Dtype()
class UInt64Dtype(IntegerDtype): type = np.uint64 name: ClassVar[str] = "UInt64" __doc__ = _dtype_docstring.format(dtype="uint64")
()
68,300
pandas.core.arrays.integer
UInt8Dtype
An ExtensionDtype for uint8 integer data. Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`. Attributes ---------- None Methods ------- None Examples -------- For Int8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype()) >>> ser.dtype Int8Dtype() For Int16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype()) >>> ser.dtype Int16Dtype() For Int32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype()) >>> ser.dtype Int32Dtype() For Int64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype()) >>> ser.dtype Int64Dtype() For UInt8Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype()) >>> ser.dtype UInt8Dtype() For UInt16Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype()) >>> ser.dtype UInt16Dtype() For UInt32Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype()) >>> ser.dtype UInt32Dtype() For UInt64Dtype: >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype()) >>> ser.dtype UInt64Dtype()
class UInt8Dtype(IntegerDtype): type = np.uint8 name: ClassVar[str] = "UInt8" __doc__ = _dtype_docstring.format(dtype="uint8")
()
68,316
pandas.core.construction
array
Create an array. Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ======================================= Scalar Type Array Type ============================== ======================================= :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` or :class:`pandas.arrays.ArrowStringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ======================================= The ExtensionArray created when the scalar type is :class:`str` is determined by ``pd.options.mode.string_storage`` if the dtype is not explicitly given. For all other cases, NumPy's usual inference rules will be used. copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.NumpyExtensionArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <NumpyExtensionArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <NumpyExtensionArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``NumpyExtensionArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1h", "2h"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array([1.1, 2.2]) <FloatingArray> [1.1, 2.2] Length: 2, dtype: Float64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> with pd.option_context("string_storage", "pyarrow"): ... arr = pd.array(["a", None, "c"]) ... >>> arr <ArrowStringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') ['a', 'b', 'a'] Categories (2, object): ['a', 'b'] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) ['a', 'b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] If pandas does not infer a dedicated extension type a :class:`arrays.NumpyExtensionArray` is returned. >>> pd.array([1 + 1j, 3 + 2j]) <NumpyExtensionArray> [(1+1j), (3+2j)] Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <NumpyExtensionArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'.
def array( data: Sequence[object] | AnyArrayLike, dtype: Dtype | None = None, copy: bool = True, ) -> ExtensionArray: """ Create an array. Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ======================================= Scalar Type Array Type ============================== ======================================= :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`float` :class:`pandas.arrays.FloatingArray` :class:`str` :class:`pandas.arrays.StringArray` or :class:`pandas.arrays.ArrowStringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ======================================= The ExtensionArray created when the scalar type is :class:`str` is determined by ``pd.options.mode.string_storage`` if the dtype is not explicitly given. For all other cases, NumPy's usual inference rules will be used. copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.NumpyExtensionArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <NumpyExtensionArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <NumpyExtensionArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``NumpyExtensionArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1h", "2h"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array([1.1, 2.2]) <FloatingArray> [1.1, 2.2] Length: 2, dtype: Float64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> with pd.option_context("string_storage", "pyarrow"): ... arr = pd.array(["a", None, "c"]) ... >>> arr <ArrowStringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') ['a', 'b', 'a'] Categories (2, object): ['a', 'b'] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) ['a', 'b', 'a'] Categories (3, object): ['a' < 'b' < 'c'] If pandas does not infer a dedicated extension type a :class:`arrays.NumpyExtensionArray` is returned. >>> pd.array([1 + 1j, 3 + 2j]) <NumpyExtensionArray> [(1+1j), (3+2j)] Length: 2, dtype: complex128 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <NumpyExtensionArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( BooleanArray, DatetimeArray, ExtensionArray, FloatingArray, IntegerArray, IntervalArray, NumpyExtensionArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) elif isinstance(data, ABCDataFrame): raise TypeError("Cannot pass DataFrame to 'pandas.array'") if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): # Note: we exclude np.ndarray here, will do type inference on it dtype = data.dtype data = extract_array(data, extract_numpy=True) # this returns None for not-found dtypes. if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype): # e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray if copy: return data.copy() return data if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) return PeriodArray._from_sequence(period_data, copy=copy) elif inferred_dtype == "interval": return IntervalArray(data, copy=copy) elif inferred_dtype.startswith("datetime"): # datetime, datetime64 try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to NumpyExtensionArray pass elif inferred_dtype.startswith("timedelta"): # timedelta, timedelta64 return TimedeltaArray._from_sequence(data, copy=copy) elif inferred_dtype == "string": # StringArray/ArrowStringArray depending on pd.options.mode.string_storage dtype = StringDtype() cls = dtype.construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) elif inferred_dtype == "integer": return IntegerArray._from_sequence(data, copy=copy) elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data): return FloatingArray._from_sequence(data, copy=copy) elif ( inferred_dtype in ("floating", "mixed-integer-float") and getattr(data, "dtype", None) != np.float16 ): # GH#44715 Exclude np.float16 bc FloatingArray does not support it; # we will fall back to NumpyExtensionArray. return FloatingArray._from_sequence(data, copy=copy) elif inferred_dtype == "boolean": return BooleanArray._from_sequence(data, dtype="boolean", copy=copy) # Pandas overrides NumPy for # 1. datetime64[ns,us,ms,s] # 2. timedelta64[ns,us,ms,s] # so that a DatetimeArray is returned. if lib.is_np_dtype(dtype, "M") and is_supported_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) if lib.is_np_dtype(dtype, "m") and is_supported_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) elif lib.is_np_dtype(dtype, "mM"): warnings.warn( r"datetime64 and timedelta64 dtype resolutions other than " r"'s', 'ms', 'us', and 'ns' are deprecated. " r"In future releases passing unsupported resolutions will " r"raise an exception.", FutureWarning, stacklevel=find_stack_level(), ) return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy)
(data: 'Sequence[object] | AnyArrayLike', dtype: 'Dtype | None' = None, copy: 'bool' = True) -> 'ExtensionArray'
68,318
pandas.core.indexes.datetimes
bdate_range
Return a fixed frequency DatetimeIndex with business day as the default. Parameters ---------- start : str or datetime-like, default None Left bound for generating dates. end : str or datetime-like, default None Right bound for generating dates. periods : int, default None Number of periods to generate. freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B' Frequency strings can have multiples, e.g. '5h'. The default is business daily ('B'). tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B')
def bdate_range( start=None, end=None, periods: int | None = None, freq: Frequency | dt.timedelta = "B", tz=None, normalize: bool = True, name: Hashable | None = None, weekmask=None, holidays=None, inclusive: IntervalClosedType = "both", **kwargs, ) -> DatetimeIndex: """ Return a fixed frequency DatetimeIndex with business day as the default. Parameters ---------- start : str or datetime-like, default None Left bound for generating dates. end : str or datetime-like, default None Right bound for generating dates. periods : int, default None Number of periods to generate. freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B' Frequency strings can have multiples, e.g. '5h'. The default is business daily ('B'). tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B') """ if freq is None: msg = "freq must be specified for bdate_range; use date_range instead" raise TypeError(msg) if isinstance(freq, str) and freq.startswith("C"): try: weekmask = weekmask or "Mon Tue Wed Thu Fri" freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError) as err: msg = f"invalid custom frequency string: {freq}" raise ValueError(msg) from err elif holidays or weekmask: msg = ( "a custom frequency string is required when holidays or " f"weekmask are passed, got frequency {freq}" ) raise ValueError(msg) return date_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, inclusive=inclusive, **kwargs, )
(start=None, end=None, periods: 'int | None' = None, freq: 'Frequency | dt.timedelta' = 'B', tz=None, normalize: 'bool' = True, name: 'Hashable | None' = None, weekmask=None, holidays=None, inclusive: 'IntervalClosedType' = 'both', **kwargs) -> 'DatetimeIndex'
68,320
pandas.core.reshape.concat
concat
Concatenate pandas objects along a particular axis. Allows optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series or DataFrame objects If a mapping is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default False Sort non-concatenation axis if it is not already aligned. One exception to this is when the non-concatentation axis is a DatetimeIndex and join='outer' and the axis is not already aligned. In that case, the non-concatenation axis is always sorted lexicographically. copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__. It is not recommended to build DataFrames by adding single rows in a for loop. Build a list of rows and make a DataFrame in a single concat. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] Append a single row to the end of a ``DataFrame`` object. >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) >>> df7 a b 0 1 2 >>> new_row = pd.Series({'a': 3, 'b': 4}) >>> new_row a 3 b 4 dtype: int64 >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) a b 0 1 2 1 3 4
def concat( objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], *, axis: Axis = 0, join: str = "outer", ignore_index: bool = False, keys: Iterable[Hashable] | None = None, levels=None, names: list[HashableT] | None = None, verify_integrity: bool = False, sort: bool = False, copy: bool | None = None, ) -> DataFrame | Series: """ Concatenate pandas objects along a particular axis. Allows optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series or DataFrame objects If a mapping is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default False Sort non-concatenation axis if it is not already aligned. One exception to this is when the non-concatentation axis is a DatetimeIndex and join='outer' and the axis is not already aligned. In that case, the non-concatenation axis is always sorted lexicographically. copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__. It is not recommended to build DataFrames by adding single rows in a for loop. Build a list of rows and make a DataFrame in a single concat. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] Append a single row to the end of a ``DataFrame`` object. >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) >>> df7 a b 0 1 2 >>> new_row = pd.Series({'a': 3, 'b': 4}) >>> new_row a 3 b 4 dtype: int64 >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) a b 0 1 2 1 3 4 """ if copy is None: if using_copy_on_write(): copy = False else: copy = True elif copy and using_copy_on_write(): copy = False op = _Concatenator( objs, axis=axis, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort, ) return op.get_result()
(objs: 'Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame]', *, axis: 'Axis' = 0, join: 'str' = 'outer', ignore_index: 'bool' = False, keys: 'Iterable[Hashable] | None' = None, levels=None, names: 'list[HashableT] | None' = None, verify_integrity: 'bool' = False, sort: 'bool' = False, copy: 'bool | None' = None) -> 'DataFrame | Series'
68,322
pandas.core.reshape.pivot
crosstab
Compute a simple cross tabulation of two (or more) factors. By default, computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Reference :ref:`the user guide <reshaping.crosstabulations>` for more examples. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0
def crosstab( index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins: bool = False, margins_name: Hashable = "All", dropna: bool = True, normalize: bool | Literal[0, 1, "all", "index", "columns"] = False, ) -> DataFrame: """ Compute a simple cross tabulation of two (or more) factors. By default, computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Reference :ref:`the user guide <reshaping.crosstabulations>` for more examples. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0 """ if values is None and aggfunc is not None: raise ValueError("aggfunc cannot be used without values.") if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") if not is_nested_list_like(index): index = [index] if not is_nested_list_like(columns): columns = [columns] common_idx = None pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] if pass_objs: common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False) rownames = _get_names(index, rownames, prefix="row") colnames = _get_names(columns, colnames, prefix="col") # duplicate names mapped to unique names for pivot op ( rownames_mapper, unique_rownames, colnames_mapper, unique_colnames, ) = _build_names_mapper(rownames, colnames) from pandas import DataFrame data = { **dict(zip(unique_rownames, index)), **dict(zip(unique_colnames, columns)), } df = DataFrame(data, index=common_idx) if values is None: df["__dummy__"] = 0 kwargs = {"aggfunc": len, "fill_value": 0} else: df["__dummy__"] = values kwargs = {"aggfunc": aggfunc} # error: Argument 7 to "pivot_table" of "DataFrame" has incompatible type # "**Dict[str, object]"; expected "Union[...]" table = df.pivot_table( "__dummy__", index=unique_rownames, columns=unique_colnames, margins=margins, margins_name=margins_name, dropna=dropna, observed=False, **kwargs, # type: ignore[arg-type] ) # Post-process if normalize is not False: table = _normalize( table, normalize=normalize, margins=margins, margins_name=margins_name ) table = table.rename_axis(index=rownames_mapper, axis=0) table = table.rename_axis(columns=colnames_mapper, axis=1) return table
(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins: 'bool' = False, margins_name: 'Hashable' = 'All', dropna: 'bool' = True, normalize: "bool | Literal[0, 1, 'all', 'index', 'columns']" = False) -> 'DataFrame'
68,323
pandas.core.reshape.tile
cut
Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or False, default None Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. If True, raises an error. When `ordered=False`, labels must be provided. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. ordered : bool, default True Whether the labels are ordered or not. Applies to returned types Categorical and Series (with Categorical dtype). If True, the resulting categorical will be ordered. If False, the resulting categorical will be unordered (labels must be provided). Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * None (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Reference :ref:`the user guide <reshaping.tile.cut>` for more examples. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) ['bad', 'good', 'medium', 'medium', 'good', 'bad'] Categories (3, object): ['bad' < 'medium' < 'good'] ``ordered=False`` will result in unordered categories when labels are passed. This parameter can be used to allow non-unique labels: >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, ... labels=["B", "A", "B"], ordered=False) ['B', 'B', 'A', 'A', 'B', 'B'] Categories (2, object): ['A', 'B'] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 1.0 b 2.0 c 3.0 d 4.0 e NaN dtype: float64, array([ 0, 2, 4, 6, 8, 10])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 1.0 b 2.0 c 3.0 d 3.0 e NaN dtype: float64, array([ 0, 2, 4, 6, 10])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]] Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]]
def cut( x, bins, right: bool = True, labels=None, retbins: bool = False, precision: int = 3, include_lowest: bool = False, duplicates: str = "raise", ordered: bool = True, ): """ Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or False, default None Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. If True, raises an error. When `ordered=False`, labels must be provided. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. ordered : bool, default True Whether the labels are ordered or not. Applies to returned types Categorical and Series (with Categorical dtype). If True, the resulting categorical will be ordered. If False, the resulting categorical will be unordered (labels must be provided). Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * None (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Reference :ref:`the user guide <reshaping.tile.cut>` for more examples. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) ['bad', 'good', 'medium', 'medium', 'good', 'bad'] Categories (3, object): ['bad' < 'medium' < 'good'] ``ordered=False`` will result in unordered categories when labels are passed. This parameter can be used to allow non-unique labels: >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, ... labels=["B", "A", "B"], ordered=False) ['B', 'B', 'A', 'A', 'B', 'B'] Categories (2, object): ['A', 'B'] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 1.0 b 2.0 c 3.0 d 4.0 e NaN dtype: float64, array([ 0, 2, 4, 6, 8, 10])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 1.0 b 2.0 c 3.0 d 3.0 e NaN dtype: float64, array([ 0, 2, 4, 6, 10])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]] Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]] """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0 original = x x_idx = _preprocess_for_cut(x) x_idx, _ = _coerce_to_type(x_idx) if not np.iterable(bins): bins = _nbins_to_bins(x_idx, bins, right) elif isinstance(bins, IntervalIndex): if bins.is_overlapping: raise ValueError("Overlapping IntervalIndex is not accepted.") else: bins = Index(bins) if not bins.is_monotonic_increasing: raise ValueError("bins must increase monotonically.") fac, bins = _bins_to_cuts( x_idx, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, duplicates=duplicates, ordered=ordered, ) return _postprocess_for_cut(fac, bins, retbins, original)
(x, bins, right: bool = True, labels=None, retbins: bool = False, precision: int = 3, include_lowest: bool = False, duplicates: str = 'raise', ordered: bool = True)
68,324
pandas.core.indexes.datetimes
date_range
Return a fixed frequency DatetimeIndex. Returns the range of equally spaced time points (where the difference between any two adjacent points is specified by the given frequency) such that they all satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp., the first and last time points in that range that fall on the boundary of ``freq`` (if given as a frequency string) or that are valid for ``freq`` (if given as a :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``, ``end``, or ``freq`` is *not* specified, this missing parameter can be computed given ``periods``, the number of timesteps in the range. See the note below.) Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5h'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive unless timezone-aware datetime-likes are passed. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 unit : str, default None Specify the desired resolution of the result. .. versionadded:: 2.0.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify timezone-aware `start` and `end`, with the default daily frequency. >>> pd.date_range( ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"), ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"), ... ) DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00', '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00', '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00', '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'], dtype='datetime64[ns, Europe/Berlin]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'ME'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='ME') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='ME') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3ME') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3ME') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3ME') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `inclusive` controls whether to include `start` and `end` that are on the boundary. The default, "both", includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both") DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``inclusive='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and similarly ``inclusive='neither'`` will exclude both `start` and `end`. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') **Specify a unit** >>> pd.date_range(start="2017-01-01", periods=10, freq="100YS", unit="s") DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01', '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01', '2817-01-01', '2917-01-01'], dtype='datetime64[s]', freq='100YS-JAN')
def date_range( start=None, end=None, periods=None, freq=None, tz=None, normalize: bool = False, name: Hashable | None = None, inclusive: IntervalClosedType = "both", *, unit: str | None = None, **kwargs, ) -> DatetimeIndex: """ Return a fixed frequency DatetimeIndex. Returns the range of equally spaced time points (where the difference between any two adjacent points is specified by the given frequency) such that they all satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp., the first and last time points in that range that fall on the boundary of ``freq`` (if given as a frequency string) or that are valid for ``freq`` (if given as a :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``, ``end``, or ``freq`` is *not* specified, this missing parameter can be computed given ``periods``, the number of timesteps in the range. See the note below.) Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5h'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive unless timezone-aware datetime-likes are passed. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; Whether to set each bound as closed or open. .. versionadded:: 1.4.0 unit : str, default None Specify the desired resolution of the result. .. versionadded:: 2.0.0 **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify timezone-aware `start` and `end`, with the default daily frequency. >>> pd.date_range( ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"), ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"), ... ) DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00', '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00', '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00', '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'], dtype='datetime64[ns, Europe/Berlin]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'ME'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='ME') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='ME') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3ME') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3ME') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3ME') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `inclusive` controls whether to include `start` and `end` that are on the boundary. The default, "both", includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both") DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``inclusive='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and similarly ``inclusive='neither'`` will exclude both `start` and `end`. >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') **Specify a unit** >>> pd.date_range(start="2017-01-01", periods=10, freq="100YS", unit="s") DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01', '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01', '2817-01-01', '2917-01-01'], dtype='datetime64[s]', freq='100YS-JAN') """ if freq is None and com.any_none(periods, start, end): freq = "D" dtarr = DatetimeArray._generate_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, inclusive=inclusive, unit=unit, **kwargs, ) return DatetimeIndex._simple_new(dtarr, name=name)
(start=None, end=None, periods=None, freq=None, tz=None, normalize: 'bool' = False, name: 'Hashable | None' = None, inclusive: 'IntervalClosedType' = 'both', *, unit: 'str | None' = None, **kwargs) -> 'DatetimeIndex'
68,326
pandas.core.computation.eval
eval
Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : {'pandas', 'python'}, default 'pandas' The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : {'python', 'numexpr'}, default 'numexpr' The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'`` : This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'`` : Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series, or None The completion value of evaluating the given code or None if ``inplace=True``. Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. Examples -------- >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) >>> df animal age 0 dog 10 1 pig 20 We can add a new column using ``pd.eval``: >>> pd.eval("double_age = df.age * 2", target=df) animal age double_age 0 dog 10 20 1 pig 20 40
def eval( expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users parser: str = "pandas", engine: str | None = None, local_dict=None, global_dict=None, resolvers=(), level: int = 0, target=None, inplace: bool = False, ): """ Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : {'pandas', 'python'}, default 'pandas' The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : {'python', 'numexpr'}, default 'numexpr' The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'`` : This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'`` : Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series, or None The completion value of evaluating the given code or None if ``inplace=True``. Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. Examples -------- >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) >>> df animal age 0 dog 10 1 pig 20 We can add a new column using ``pd.eval``: >>> pd.eval("double_age = df.age * 2", target=df) animal age double_age 0 dog 10 20 1 pig 20 40 """ inplace = validate_bool_kwarg(inplace, "inplace") exprs: list[str | BinOp] if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""] else: # ops.BinOp; for internal compat, not intended to be passed by users exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError( "multi-line expressions are only valid in the " "context of data, use DataFrame.eval" ) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = ensure_scope( level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target, ) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env) if engine == "numexpr" and ( is_extension_array_dtype(parsed_expr.terms.return_type) or getattr(parsed_expr.terms, "operand_types", None) is not None and any( is_extension_array_dtype(elem) for elem in parsed_expr.terms.operand_types ) ): warnings.warn( "Engine has switched to 'python' because numexpr does not support " "extension array dtypes. Please set your engine to python manually.", RuntimeWarning, stacklevel=find_stack_level(), ) engine = "python" # construct the engine and evaluate the parsed expression eng = ENGINES[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError( "Multi-line expressions are only valid " "if all expressions contain an assignment" ) if inplace: raise ValueError("Cannot operate inplace if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target if isinstance(target, NDFrame): target = target.copy(deep=None) else: target = target.copy() except AttributeError as err: raise ValueError("Cannot return a copy of the target") from err else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: if inplace and isinstance(target, NDFrame): target.loc[:, assigner] = ret else: target[assigner] = ret # pyright: ignore[reportGeneralTypeIssues] except (TypeError, IndexError) as err: raise ValueError("Cannot assign expression output to target") from err if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
(expr: 'str | BinOp', parser: 'str' = 'pandas', engine: 'str | None' = None, local_dict=None, global_dict=None, resolvers=(), level: 'int' = 0, target=None, inplace: 'bool' = False)
68,327
pandas.core.algorithms
factorize
Encode the object as an enumerated type or categorical variable. This method is useful for obtaining a numeric representation of an array when all that matters is identifying distinct values. `factorize` is available as both a top-level function :func:`pandas.factorize`, and as a method :meth:`Series.factorize` and :meth:`Index.factorize`. Parameters ---------- values : sequence A 1-D sequence. Sequences that aren't pandas objects are coerced to ndarrays before factorization. sort : bool, default False Sort `uniques` and shuffle `codes` to maintain the relationship. use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. .. versionadded:: 1.5.0 size_hint : int, optional Hint to the hashtable sizer. Returns ------- codes : ndarray An integer ndarray that's an indexer into `uniques`. ``uniques.take(codes)`` will have the same values as `values`. uniques : ndarray, Index, or Categorical The unique valid values. When `values` is Categorical, `uniques` is a Categorical. When `values` is some other pandas object, an `Index` is returned. Otherwise, a 1-D ndarray is returned. .. note:: Even if there's a missing value in `values`, `uniques` will *not* contain an entry for it. See Also -------- cut : Discretize continuous-valued array. unique : Find the unique value in an array. Notes ----- Reference :ref:`the user guide <reshaping.factorize>` for more examples. Examples -------- These examples all show factorize as a top-level method like ``pd.factorize(values)``. The results are identical for methods like :meth:`Series.factorize`. >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O")) >>> codes array([0, 0, 1, 2, 0]) >>> uniques array(['b', 'a', 'c'], dtype=object) With ``sort=True``, the `uniques` will be sorted, and `codes` will be shuffled so that the relationship is the maintained. >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"), ... sort=True) >>> codes array([1, 1, 0, 2, 1]) >>> uniques array(['a', 'b', 'c'], dtype=object) When ``use_na_sentinel=True`` (the default), missing values are indicated in the `codes` with the sentinel value ``-1`` and missing values are not included in `uniques`. >>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O")) >>> codes array([ 0, -1, 1, 2, 0]) >>> uniques array(['b', 'a', 'c'], dtype=object) Thus far, we've only factorized lists (which are internally coerced to NumPy arrays). When factorizing pandas objects, the type of `uniques` will differ. For Categoricals, a `Categorical` is returned. >>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c']) >>> codes, uniques = pd.factorize(cat) >>> codes array([0, 0, 1]) >>> uniques ['a', 'c'] Categories (3, object): ['a', 'b', 'c'] Notice that ``'b'`` is in ``uniques.categories``, despite not being present in ``cat.values``. For all other pandas objects, an Index of the appropriate type is returned. >>> cat = pd.Series(['a', 'a', 'c']) >>> codes, uniques = pd.factorize(cat) >>> codes array([0, 0, 1]) >>> uniques Index(['a', 'c'], dtype='object') If NaN is in the values, and we want to include NaN in the uniques of the values, it can be achieved by setting ``use_na_sentinel=False``. >>> values = np.array([1, 2, 1, np.nan]) >>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True >>> codes array([ 0, 1, 0, -1]) >>> uniques array([1., 2.]) >>> codes, uniques = pd.factorize(values, use_na_sentinel=False) >>> codes array([0, 1, 0, 2]) >>> uniques array([ 1., 2., nan])
@doc( values=dedent( """\ values : sequence A 1-D sequence. Sequences that aren't pandas objects are coerced to ndarrays before factorization. """ ), sort=dedent( """\ sort : bool, default False Sort `uniques` and shuffle `codes` to maintain the relationship. """ ), size_hint=dedent( """\ size_hint : int, optional Hint to the hashtable sizer. """ ), ) def factorize( values, sort: bool = False, use_na_sentinel: bool = True, size_hint: int | None = None, ) -> tuple[np.ndarray, np.ndarray | Index]: """ Encode the object as an enumerated type or categorical variable. This method is useful for obtaining a numeric representation of an array when all that matters is identifying distinct values. `factorize` is available as both a top-level function :func:`pandas.factorize`, and as a method :meth:`Series.factorize` and :meth:`Index.factorize`. Parameters ---------- {values}{sort} use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. .. versionadded:: 1.5.0 {size_hint}\ Returns ------- codes : ndarray An integer ndarray that's an indexer into `uniques`. ``uniques.take(codes)`` will have the same values as `values`. uniques : ndarray, Index, or Categorical The unique valid values. When `values` is Categorical, `uniques` is a Categorical. When `values` is some other pandas object, an `Index` is returned. Otherwise, a 1-D ndarray is returned. .. note:: Even if there's a missing value in `values`, `uniques` will *not* contain an entry for it. See Also -------- cut : Discretize continuous-valued array. unique : Find the unique value in an array. Notes ----- Reference :ref:`the user guide <reshaping.factorize>` for more examples. Examples -------- These examples all show factorize as a top-level method like ``pd.factorize(values)``. The results are identical for methods like :meth:`Series.factorize`. >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O")) >>> codes array([0, 0, 1, 2, 0]) >>> uniques array(['b', 'a', 'c'], dtype=object) With ``sort=True``, the `uniques` will be sorted, and `codes` will be shuffled so that the relationship is the maintained. >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"), ... sort=True) >>> codes array([1, 1, 0, 2, 1]) >>> uniques array(['a', 'b', 'c'], dtype=object) When ``use_na_sentinel=True`` (the default), missing values are indicated in the `codes` with the sentinel value ``-1`` and missing values are not included in `uniques`. >>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O")) >>> codes array([ 0, -1, 1, 2, 0]) >>> uniques array(['b', 'a', 'c'], dtype=object) Thus far, we've only factorized lists (which are internally coerced to NumPy arrays). When factorizing pandas objects, the type of `uniques` will differ. For Categoricals, a `Categorical` is returned. >>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c']) >>> codes, uniques = pd.factorize(cat) >>> codes array([0, 0, 1]) >>> uniques ['a', 'c'] Categories (3, object): ['a', 'b', 'c'] Notice that ``'b'`` is in ``uniques.categories``, despite not being present in ``cat.values``. For all other pandas objects, an Index of the appropriate type is returned. >>> cat = pd.Series(['a', 'a', 'c']) >>> codes, uniques = pd.factorize(cat) >>> codes array([0, 0, 1]) >>> uniques Index(['a', 'c'], dtype='object') If NaN is in the values, and we want to include NaN in the uniques of the values, it can be achieved by setting ``use_na_sentinel=False``. >>> values = np.array([1, 2, 1, np.nan]) >>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True >>> codes array([ 0, 1, 0, -1]) >>> uniques array([1., 2.]) >>> codes, uniques = pd.factorize(values, use_na_sentinel=False) >>> codes array([0, 1, 0, 2]) >>> uniques array([ 1., 2., nan]) """ # Implementation notes: This method is responsible for 3 things # 1.) coercing data to array-like (ndarray, Index, extension array) # 2.) factorizing codes and uniques # 3.) Maybe boxing the uniques in an Index # # Step 2 is dispatched to extension types (like Categorical). They are # responsible only for factorization. All data coercion, sorting and boxing # should happen here. if isinstance(values, (ABCIndex, ABCSeries)): return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel) values = _ensure_arraylike(values, func_name="factorize") original = values if ( isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray)) and values.freq is not None ): # The presence of 'freq' means we can fast-path sorting and know there # aren't NAs codes, uniques = values.factorize(sort=sort) return codes, uniques elif not isinstance(values, np.ndarray): # i.e. ExtensionArray codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel) else: values = np.asarray(values) # convert DTA/TDA/MultiIndex if not use_na_sentinel and values.dtype == object: # factorize can now handle differentiating various types of null values. # These can only occur when the array has object dtype. # However, for backwards compatibility we only use the null for the # provided dtype. This may be revisited in the future, see GH#48476. null_mask = isna(values) if null_mask.any(): na_value = na_value_for_dtype(values.dtype, compat=False) # Don't modify (potentially user-provided) array values = np.where(null_mask, na_value, values) codes, uniques = factorize_array( values, use_na_sentinel=use_na_sentinel, size_hint=size_hint, ) if sort and len(uniques) > 0: uniques, codes = safe_sort( uniques, codes, use_na_sentinel=use_na_sentinel, assume_unique=True, verify=False, ) uniques = _reconstruct_data(uniques, original.dtype, original) return codes, uniques
(values, sort: 'bool' = False, use_na_sentinel: 'bool' = True, size_hint: 'int | None' = None) -> 'tuple[np.ndarray, np.ndarray | Index]'
68,328
pandas.core.reshape.encoding
from_dummies
Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. Inverts the operation performed by :func:`~pandas.get_dummies`. .. versionadded:: 1.5.0 Parameters ---------- data : DataFrame Data which contains dummy-coded variables in form of integer columns of 1's and 0's. sep : str, default None Separator used in the column names of the dummy categories they are character indicating the separation of the categorical names from the prefixes. For example, if your column names are 'prefix_A' and 'prefix_B', you can strip the underscore by specifying sep='_'. default_category : None, Hashable or dict of Hashables, default None The default category is the implied category when a value has none of the listed categories specified with a one, i.e. if all dummies in a row are zero. Can be a single value for all variables or a dict directly mapping the default categories to a prefix of a variable. Returns ------- DataFrame Categorical data decoded from the dummy input-data. Raises ------ ValueError * When the input ``DataFrame`` ``data`` contains NA values. * When the input ``DataFrame`` ``data`` contains column names with separators that do not match the separator specified with ``sep``. * When a ``dict`` passed to ``default_category`` does not include an implied category for each prefix. * When a value in ``data`` has more than one category assigned to it. * When ``default_category=None`` and a value in ``data`` has no category assigned to it. TypeError * When the input ``data`` is not of type ``DataFrame``. * When the input ``DataFrame`` ``data`` contains non-dummy data. * When the passed ``sep`` is of a wrong data type. * When the passed ``default_category`` is of a wrong data type. See Also -------- :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. :class:`~pandas.Categorical` : Represent a categorical variable in classic. Notes ----- The columns of the passed dummy data should only include 1's and 0's, or boolean values. Examples -------- >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], ... "c": [0, 0, 1, 0]}) >>> df a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> pd.from_dummies(df) 0 a 1 b 2 c 3 a >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 1]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 1 0 0 0 1 >>> pd.from_dummies(df, sep="_") col1 col2 0 a b 1 b a 2 a c >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 0]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 0 0 0 0 0 >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) col1 col2 0 a b 1 b a 2 d e
def from_dummies( data: DataFrame, sep: None | str = None, default_category: None | Hashable | dict[str, Hashable] = None, ) -> DataFrame: """ Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. Inverts the operation performed by :func:`~pandas.get_dummies`. .. versionadded:: 1.5.0 Parameters ---------- data : DataFrame Data which contains dummy-coded variables in form of integer columns of 1's and 0's. sep : str, default None Separator used in the column names of the dummy categories they are character indicating the separation of the categorical names from the prefixes. For example, if your column names are 'prefix_A' and 'prefix_B', you can strip the underscore by specifying sep='_'. default_category : None, Hashable or dict of Hashables, default None The default category is the implied category when a value has none of the listed categories specified with a one, i.e. if all dummies in a row are zero. Can be a single value for all variables or a dict directly mapping the default categories to a prefix of a variable. Returns ------- DataFrame Categorical data decoded from the dummy input-data. Raises ------ ValueError * When the input ``DataFrame`` ``data`` contains NA values. * When the input ``DataFrame`` ``data`` contains column names with separators that do not match the separator specified with ``sep``. * When a ``dict`` passed to ``default_category`` does not include an implied category for each prefix. * When a value in ``data`` has more than one category assigned to it. * When ``default_category=None`` and a value in ``data`` has no category assigned to it. TypeError * When the input ``data`` is not of type ``DataFrame``. * When the input ``DataFrame`` ``data`` contains non-dummy data. * When the passed ``sep`` is of a wrong data type. * When the passed ``default_category`` is of a wrong data type. See Also -------- :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. :class:`~pandas.Categorical` : Represent a categorical variable in classic. Notes ----- The columns of the passed dummy data should only include 1's and 0's, or boolean values. Examples -------- >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], ... "c": [0, 0, 1, 0]}) >>> df a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> pd.from_dummies(df) 0 a 1 b 2 c 3 a >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 1]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 1 0 0 0 1 >>> pd.from_dummies(df, sep="_") col1 col2 0 a b 1 b a 2 a c >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], ... "col2_c": [0, 0, 0]}) >>> df col1_a col1_b col2_a col2_b col2_c 0 1 0 0 1 0 1 0 1 1 0 0 2 0 0 0 0 0 >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) col1 col2 0 a b 1 b a 2 d e """ from pandas.core.reshape.concat import concat if not isinstance(data, DataFrame): raise TypeError( "Expected 'data' to be a 'DataFrame'; " f"Received 'data' of type: {type(data).__name__}" ) col_isna_mask = cast(Series, data.isna().any()) if col_isna_mask.any(): raise ValueError( "Dummy DataFrame contains NA value in column: " f"'{col_isna_mask.idxmax()}'" ) # index data with a list of all columns that are dummies try: data_to_decode = data.astype("boolean", copy=False) except TypeError: raise TypeError("Passed DataFrame contains non-dummy data") # collect prefixes and get lists to slice data for each prefix variables_slice = defaultdict(list) if sep is None: variables_slice[""] = list(data.columns) elif isinstance(sep, str): for col in data_to_decode.columns: prefix = col.split(sep)[0] if len(prefix) == len(col): raise ValueError(f"Separator not specified for column: {col}") variables_slice[prefix].append(col) else: raise TypeError( "Expected 'sep' to be of type 'str' or 'None'; " f"Received 'sep' of type: {type(sep).__name__}" ) if default_category is not None: if isinstance(default_category, dict): if not len(default_category) == len(variables_slice): len_msg = ( f"Length of 'default_category' ({len(default_category)}) " f"did not match the length of the columns being encoded " f"({len(variables_slice)})" ) raise ValueError(len_msg) elif isinstance(default_category, Hashable): default_category = dict( zip(variables_slice, [default_category] * len(variables_slice)) ) else: raise TypeError( "Expected 'default_category' to be of type " "'None', 'Hashable', or 'dict'; " "Received 'default_category' of type: " f"{type(default_category).__name__}" ) cat_data = {} for prefix, prefix_slice in variables_slice.items(): if sep is None: cats = prefix_slice.copy() else: cats = [col[len(prefix + sep) :] for col in prefix_slice] assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1) if any(assigned > 1): raise ValueError( "Dummy DataFrame contains multi-assignment(s); " f"First instance in row: {assigned.idxmax()}" ) if any(assigned == 0): if isinstance(default_category, dict): cats.append(default_category[prefix]) else: raise ValueError( "Dummy DataFrame contains unassigned value(s); " f"First instance in row: {assigned.idxmin()}" ) data_slice = concat( (data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1 ) else: data_slice = data_to_decode.loc[:, prefix_slice] cats_array = data._constructor_sliced(cats, dtype=data.columns.dtype) # get indices of True entries along axis=1 true_values = data_slice.idxmax(axis=1) indexer = data_slice.columns.get_indexer_for(true_values) cat_data[prefix] = cats_array.take(indexer).set_axis(data.index) result = DataFrame(cat_data) if sep is not None: result.columns = result.columns.astype(data.columns.dtype) return result
(data: pandas.core.frame.DataFrame, sep: Optional[str] = None, default_category: Union[NoneType, collections.abc.Hashable, dict[str, collections.abc.Hashable]] = None) -> pandas.core.frame.DataFrame
68,329
pandas.core.reshape.encoding
get_dummies
Convert categorical variable into dummy/indicator variables. Each variable is converted in as many 0/1 variables as there are different values. Columns in the output are each named after a value; if the input is a DataFrame, the name of the original variable is prepended to the value. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object`, `string`, or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default bool Data type for new columns. Only a single dtype is allowed. Returns ------- DataFrame Dummy-coded data. If `data` contains other columns than the dummy-coded one(s), these will be prepended, unaltered, to the result. See Also -------- Series.str.get_dummies : Convert Series of strings to dummy codes. :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. Notes ----- Reference :ref:`the user guide <reshaping.dummies>` for more examples. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 True False False 1 False True False 2 False False True 3 True False False >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 True False 1 False True 2 False False >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 True False False 1 False True False 2 False False True >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 True False False True False 1 2 False True True False False 2 3 True False False False True >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 True False False 1 False True False 2 False False True 3 True False False 4 True False False >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 False False 1 True False 2 False True 3 False False 4 False False >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0
def get_dummies( data, prefix=None, prefix_sep: str | Iterable[str] | dict[str, str] = "_", dummy_na: bool = False, columns=None, sparse: bool = False, drop_first: bool = False, dtype: NpDtype | None = None, ) -> DataFrame: """ Convert categorical variable into dummy/indicator variables. Each variable is converted in as many 0/1 variables as there are different values. Columns in the output are each named after a value; if the input is a DataFrame, the name of the original variable is prepended to the value. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object`, `string`, or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default bool Data type for new columns. Only a single dtype is allowed. Returns ------- DataFrame Dummy-coded data. If `data` contains other columns than the dummy-coded one(s), these will be prepended, unaltered, to the result. See Also -------- Series.str.get_dummies : Convert Series of strings to dummy codes. :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. Notes ----- Reference :ref:`the user guide <reshaping.dummies>` for more examples. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 True False False 1 False True False 2 False False True 3 True False False >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 True False 1 False True 2 False False >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 True False False 1 False True False 2 False False True >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 True False False True False 1 2 False True True False False 2 3 True False False False True >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 True False False 1 False True False 2 False False True 3 True False False 4 True False False >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 False False 1 True False 2 False True 3 False False 4 False False >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ from pandas.core.reshape.concat import concat dtypes_to_encode = ["object", "string", "category"] if isinstance(data, DataFrame): # determine columns being encoded if columns is None: data_to_encode = data.select_dtypes(include=dtypes_to_encode) elif not is_list_like(columns): raise TypeError("Input must be a list-like for parameter `columns`") else: data_to_encode = data[columns] # validate prefixes and separator to avoid silently dropping cols def check_len(item, name: str): if is_list_like(item): if not len(item) == data_to_encode.shape[1]: len_msg = ( f"Length of '{name}' ({len(item)}) did not match the " "length of the columns being encoded " f"({data_to_encode.shape[1]})." ) raise ValueError(len_msg) check_len(prefix, "prefix") check_len(prefix_sep, "prefix_sep") if isinstance(prefix, str): prefix = itertools.cycle([prefix]) if isinstance(prefix, dict): prefix = [prefix[col] for col in data_to_encode.columns] if prefix is None: prefix = data_to_encode.columns # validate separators if isinstance(prefix_sep, str): prefix_sep = itertools.cycle([prefix_sep]) elif isinstance(prefix_sep, dict): prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] with_dummies: list[DataFrame] if data_to_encode.shape == data.shape: # Encoding the entire df, do not prepend any dropped columns with_dummies = [] elif columns is not None: # Encoding only cols specified in columns. Get all cols not in # columns to prepend to result. with_dummies = [data.drop(columns, axis=1)] else: # Encoding only object and category dtype columns. Get remaining # columns to prepend to result. with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] for col, pre, sep in zip(data_to_encode.items(), prefix, prefix_sep): # col is (column_name, column), use just column data here dummy = _get_dummies_1d( col[1], prefix=pre, prefix_sep=sep, dummy_na=dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: result = _get_dummies_1d( data, prefix, prefix_sep, dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) return result
(data, prefix=None, prefix_sep: 'str | Iterable[str] | dict[str, str]' = '_', dummy_na: 'bool' = False, columns=None, sparse: 'bool' = False, drop_first: 'bool' = False, dtype: 'NpDtype | None' = None) -> 'DataFrame'
68,330
pandas.tseries.frequencies
infer_freq
Infer the most likely frequency given the input index. Parameters ---------- index : DatetimeIndex, TimedeltaIndex, Series or array-like If passed a Series will use the values of the series (NOT THE INDEX). Returns ------- str or None None if no discernible frequency. Raises ------ TypeError If the index is not datetime-like. ValueError If there are fewer than three values. Examples -------- >>> idx = pd.date_range(start='2020/12/01', end='2020/12/30', periods=30) >>> pd.infer_freq(idx) 'D'
def infer_freq( index: DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin, ) -> str | None: """ Infer the most likely frequency given the input index. Parameters ---------- index : DatetimeIndex, TimedeltaIndex, Series or array-like If passed a Series will use the values of the series (NOT THE INDEX). Returns ------- str or None None if no discernible frequency. Raises ------ TypeError If the index is not datetime-like. ValueError If there are fewer than three values. Examples -------- >>> idx = pd.date_range(start='2020/12/01', end='2020/12/30', periods=30) >>> pd.infer_freq(idx) 'D' """ from pandas.core.api import DatetimeIndex if isinstance(index, ABCSeries): values = index._values if not ( lib.is_np_dtype(values.dtype, "mM") or isinstance(values.dtype, DatetimeTZDtype) or values.dtype == object ): raise TypeError( "cannot infer freq from a non-convertible dtype " f"on a Series of {index.dtype}" ) index = values inferer: _FrequencyInferer if not hasattr(index, "dtype"): pass elif isinstance(index.dtype, PeriodDtype): raise TypeError( "PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq." ) elif lib.is_np_dtype(index.dtype, "m"): # Allow TimedeltaIndex and TimedeltaArray inferer = _TimedeltaFrequencyInferer(index) return inferer.get_freq() elif is_numeric_dtype(index.dtype): raise TypeError( f"cannot infer freq from a non-convertible index of dtype {index.dtype}" ) if not isinstance(index, DatetimeIndex): index = DatetimeIndex(index) inferer = _FrequencyInferer(index) return inferer.get_freq()
(index: 'DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin') -> 'str | None'
68,331
pandas.core.indexes.interval
interval_range
Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00], (2017-01-02 00:00:00, 2017-01-03 00:00:00], (2017-01-03 00:00:00, 2017-01-04 00:00:00]], dtype='interval[datetime64[ns], right]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00], (2017-02-01 00:00:00, 2017-03-01 00:00:00], (2017-03-01 00:00:00, 2017-04-01 00:00:00]], dtype='interval[datetime64[ns], right]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]')
def interval_range( start=None, end=None, periods=None, freq=None, name: Hashable | None = None, closed: IntervalClosedType = "right", ) -> IntervalIndex: """ Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01 00:00:00, 2017-01-02 00:00:00], (2017-01-02 00:00:00, 2017-01-03 00:00:00], (2017-01-03 00:00:00, 2017-01-04 00:00:00]], dtype='interval[datetime64[ns], right]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01 00:00:00, 2017-02-01 00:00:00], (2017-02-01 00:00:00, 2017-03-01 00:00:00], (2017-03-01 00:00:00, 2017-04-01 00:00:00]], dtype='interval[datetime64[ns], right]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]') """ start = maybe_box_datetimelike(start) end = maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com.any_none(periods, start, end): freq = 1 if is_number(endpoint) else "D" if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( "Of the four parameters: start, end, periods, and " "freq, exactly three must be specified" ) if not _is_valid_endpoint(start): raise ValueError(f"start must be numeric or datetime-like, got {start}") if not _is_valid_endpoint(end): raise ValueError(f"end must be numeric or datetime-like, got {end}") periods = validate_periods(periods) if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError as err: raise ValueError( f"freq must be numeric or convertible to DateOffset, got {freq}" ) from err # verify type compatibility if not all( [ _is_type_compatible(start, end), _is_type_compatible(start, freq), _is_type_compatible(end, freq), ] ): raise TypeError("start, end, freq need to be type compatible") # +1 to convert interval count to breaks count (n breaks = n-1 intervals) if periods is not None: periods += 1 breaks: np.ndarray | TimedeltaIndex | DatetimeIndex if is_number(endpoint): if com.all_not_none(start, end, freq): # 0.1 ensures we capture end breaks = np.arange(start, end + (freq * 0.1), freq) else: # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 elif start is None: start = end - (periods - 1) * freq elif end is None: end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com.not_none(start, end, freq)): # np.linspace always produces float output # error: Argument 1 to "maybe_downcast_numeric" has incompatible type # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]"; # expected "ndarray[Any, Any]" [ breaks = maybe_downcast_numeric( breaks, # type: ignore[arg-type] np.dtype("int64"), ) else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): breaks = date_range(start=start, end=end, periods=periods, freq=freq) else: breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
(start=None, end=None, periods=None, freq=None, name: 'Hashable | None' = None, closed: 'IntervalClosedType' = 'right') -> 'IntervalIndex'
68,333
pandas.core.dtypes.missing
isna
Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool
def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj)
(obj: 'object') -> 'bool | npt.NDArray[np.bool_] | NDFrame'
68,335
pandas.io.json._normalize
json_normalize
Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects. record_path : str or list of str, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records. meta : list of paths (str or list of str), default None Fields to use as metadata for each record in resulting table. meta_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if meta is ['foo', 'bar']. record_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar']. errors : {'raise', 'ignore'}, default 'raise' Configures error handling. * 'ignore' : will ignore KeyError if keys listed in meta are not always present. * 'raise' : will raise KeyError if keys listed in meta are not always present. sep : str, default '.' Nested records will generate names separated by sep. e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. max_level : int, default None Max number of levels(depth of dict) to normalize. if None, normalizes all levels. Returns ------- frame : DataFrame Normalize semi-structured JSON data into a flat table. Examples -------- >>> data = [ ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, ... {"name": {"given": "Mark", "family": "Regner"}}, ... {"id": 2, "name": "Faye Raker"}, ... ] >>> pd.json_normalize(data) id name.first name.last name.given name.family name 0 1.0 Coleen Volk NaN NaN NaN 1 NaN NaN NaN Mark Regner NaN 2 2.0 NaN NaN NaN NaN Faye Raker >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=0) id name fitness 0 1.0 Cole Volk {'height': 130, 'weight': 60} 1 NaN Mark Reg {'height': 130, 'weight': 60} 2 2.0 Faye Raker {'height': 130, 'weight': 60} Normalizes nested data up to level 1. >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=1) id name fitness.height fitness.weight 0 1.0 Cole Volk 130 60 1 NaN Mark Reg 130 60 2 2.0 Faye Raker 130 60 >>> data = [ ... { ... "state": "Florida", ... "shortname": "FL", ... "info": {"governor": "Rick Scott"}, ... "counties": [ ... {"name": "Dade", "population": 12345}, ... {"name": "Broward", "population": 40000}, ... {"name": "Palm Beach", "population": 60000}, ... ], ... }, ... { ... "state": "Ohio", ... "shortname": "OH", ... "info": {"governor": "John Kasich"}, ... "counties": [ ... {"name": "Summit", "population": 1234}, ... {"name": "Cuyahoga", "population": 1337}, ... ], ... }, ... ] >>> result = pd.json_normalize( ... data, "counties", ["state", "shortname", ["info", "governor"]] ... ) >>> result name population state shortname info.governor 0 Dade 12345 Florida FL Rick Scott 1 Broward 40000 Florida FL Rick Scott 2 Palm Beach 60000 Florida FL Rick Scott 3 Summit 1234 Ohio OH John Kasich 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {"A": [1, 2]} >>> pd.json_normalize(data, "A", record_prefix="Prefix.") Prefix.0 0 1 1 2 Returns normalized data with columns prefixed with the given string.
def json_normalize( data: dict | list[dict], record_path: str | list | None = None, meta: str | list[str | list[str]] | None = None, meta_prefix: str | None = None, record_prefix: str | None = None, errors: IgnoreRaise = "raise", sep: str = ".", max_level: int | None = None, ) -> DataFrame: """ Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects. record_path : str or list of str, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records. meta : list of paths (str or list of str), default None Fields to use as metadata for each record in resulting table. meta_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if meta is ['foo', 'bar']. record_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar']. errors : {'raise', 'ignore'}, default 'raise' Configures error handling. * 'ignore' : will ignore KeyError if keys listed in meta are not always present. * 'raise' : will raise KeyError if keys listed in meta are not always present. sep : str, default '.' Nested records will generate names separated by sep. e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. max_level : int, default None Max number of levels(depth of dict) to normalize. if None, normalizes all levels. Returns ------- frame : DataFrame Normalize semi-structured JSON data into a flat table. Examples -------- >>> data = [ ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, ... {"name": {"given": "Mark", "family": "Regner"}}, ... {"id": 2, "name": "Faye Raker"}, ... ] >>> pd.json_normalize(data) id name.first name.last name.given name.family name 0 1.0 Coleen Volk NaN NaN NaN 1 NaN NaN NaN Mark Regner NaN 2 2.0 NaN NaN NaN NaN Faye Raker >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=0) id name fitness 0 1.0 Cole Volk {'height': 130, 'weight': 60} 1 NaN Mark Reg {'height': 130, 'weight': 60} 2 2.0 Faye Raker {'height': 130, 'weight': 60} Normalizes nested data up to level 1. >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=1) id name fitness.height fitness.weight 0 1.0 Cole Volk 130 60 1 NaN Mark Reg 130 60 2 2.0 Faye Raker 130 60 >>> data = [ ... { ... "state": "Florida", ... "shortname": "FL", ... "info": {"governor": "Rick Scott"}, ... "counties": [ ... {"name": "Dade", "population": 12345}, ... {"name": "Broward", "population": 40000}, ... {"name": "Palm Beach", "population": 60000}, ... ], ... }, ... { ... "state": "Ohio", ... "shortname": "OH", ... "info": {"governor": "John Kasich"}, ... "counties": [ ... {"name": "Summit", "population": 1234}, ... {"name": "Cuyahoga", "population": 1337}, ... ], ... }, ... ] >>> result = pd.json_normalize( ... data, "counties", ["state", "shortname", ["info", "governor"]] ... ) >>> result name population state shortname info.governor 0 Dade 12345 Florida FL Rick Scott 1 Broward 40000 Florida FL Rick Scott 2 Palm Beach 60000 Florida FL Rick Scott 3 Summit 1234 Ohio OH John Kasich 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {"A": [1, 2]} >>> pd.json_normalize(data, "A", record_prefix="Prefix.") Prefix.0 0 1 1 2 Returns normalized data with columns prefixed with the given string. """ def _pull_field( js: dict[str, Any], spec: list | str, extract_record: bool = False ) -> Scalar | Iterable: """Internal function to pull field""" result = js try: if isinstance(spec, list): for field in spec: if result is None: raise KeyError(field) result = result[field] else: result = result[spec] except KeyError as e: if extract_record: raise KeyError( f"Key {e} not found. If specifying a record_path, all elements of " f"data should have the path." ) from e if errors == "ignore": return np.nan else: raise KeyError( f"Key {e} not found. To replace missing values of {e} with " f"np.nan, pass in errors='ignore'" ) from e return result def _pull_records(js: dict[str, Any], spec: list | str) -> list: """ Internal function to pull field for records, and similar to _pull_field, but require to return list. And will raise error if has non iterable value. """ result = _pull_field(js, spec, extract_record=True) # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not # null, otherwise return an empty list if not isinstance(result, list): if pd.isnull(result): result = [] else: raise TypeError( f"{js} has non list value {result} for path {spec}. " "Must be list or null." ) return result if isinstance(data, list) and not data: return DataFrame() elif isinstance(data, dict): # A bit of a hackjob data = [data] elif isinstance(data, abc.Iterable) and not isinstance(data, str): # GH35923 Fix pd.json_normalize to not skip the first element of a # generator input data = list(data) else: raise NotImplementedError # check to see if a simple recursive function is possible to # improve performance (see #15621) but only for cases such # as pd.Dataframe(data) or pd.Dataframe(data, sep) if ( record_path is None and meta is None and meta_prefix is None and record_prefix is None and max_level is None ): return DataFrame(_simple_json_normalize(data, sep=sep)) if record_path is None: if any([isinstance(x, dict) for x in y.values()] for y in data): # naive normalization, this is idempotent for flat records # and potentially will inflate the data considerably for # deeply nested structures: # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} # # TODO: handle record value which are lists, at least error # reasonably data = nested_to_record(data, sep=sep, max_level=max_level) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] _meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records: list = [] lengths = [] meta_vals: DefaultDict = defaultdict(list) meta_keys = [sep.join(val) for val in _meta] def _recursive_extract(data, path, seen_meta, level: int = 0) -> None: if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: for val, key in zip(_meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_records(obj, path[0]) recs = [ nested_to_record(r, sep=sep, max_level=max_level) if isinstance(r, dict) else r for r in recs ] # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(_meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: meta_val = _pull_field(obj, val[level:]) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename(columns=lambda x: f"{record_prefix}{x}") # Data types, a problem for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError( f"Conflicting metadata name {k}, need distinguishing prefix " ) # GH 37782 values = np.array(v, dtype=object) if values.ndim > 1: # GH 37782 values = np.empty((len(v),), dtype=object) for i, v in enumerate(v): values[i] = v result[k] = values.repeat(lengths) return result
(data: 'dict | list[dict]', record_path: 'str | list | None' = None, meta: 'str | list[str | list[str]] | None' = None, meta_prefix: 'str | None' = None, record_prefix: 'str | None' = None, errors: 'IgnoreRaise' = 'raise', sep: 'str' = '.', max_level: 'int | None' = None) -> 'DataFrame'
68,336
pandas.core.reshape.melt
lreshape
Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. Accepts a dictionary, ``groups``, in which each key is a new column name and each value is a list of old column names that will be "melted" under the new column name as part of the reshape. Parameters ---------- data : DataFrame The wide-format DataFrame. groups : dict {new_name : list_of_columns}. dropna : bool, default True Do not include columns whose entries are all NaN. Returns ------- DataFrame Reshaped DataFrame. See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526
def lreshape(data: DataFrame, groups: dict, dropna: bool = True) -> DataFrame: """ Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. Accepts a dictionary, ``groups``, in which each key is a new column name and each value is a list of old column names that will be "melted" under the new column name as part of the reshape. Parameters ---------- data : DataFrame The wide-format DataFrame. groups : dict {new_name : list_of_columns}. dropna : bool, default True Do not include columns whose entries are all NaN. Returns ------- DataFrame Reshaped DataFrame. See Also -------- melt : Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. pivot : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 """ mdata = {} pivot_cols = [] all_cols: set[Hashable] = set() K = len(next(iter(groups.values()))) for target, names in groups.items(): if len(names) != K: raise ValueError("All column lists must be same length") to_concat = [data[col]._values for col in names] mdata[target] = concat_compat(to_concat) pivot_cols.append(target) all_cols = all_cols.union(names) id_cols = list(data.columns.difference(all_cols)) for col in id_cols: mdata[col] = np.tile(data[col]._values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols)
(data: 'DataFrame', groups: 'dict', dropna: 'bool' = True) -> 'DataFrame'
68,337
pandas.core.reshape.melt
melt
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. Parameters ---------- id_vars : scalar, tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : scalar, tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar, default None Name to use for the 'variable' column. If None it uses ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' Name to use for the 'value' column, can't be an existing column label. col_level : scalar, optional If columns are a MultiIndex then use this level to melt. ignore_index : bool, default True If True, original index is ignored. If False, the original index is retained. Index labels will be repeated as necessary. Returns ------- DataFrame Unpivoted DataFrame. See Also -------- DataFrame.melt : Identical method. pivot_table : Create a spreadsheet-style pivot table as a DataFrame. DataFrame.pivot : Return reshaped DataFrame organized by given index / column values. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- Reference :ref:`the user guide <reshaping.melt>` for more examples. Examples -------- >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> pd.melt(df, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 1 b B 3 2 c B 5 3 a C 2 4 b C 4 5 c C 6 The names of 'variable' and 'value' columns can be customized: >>> pd.melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 Original index values can be kept around: >>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'], ignore_index=False) A variable value 0 a B 1 1 b B 3 2 c B 5 0 a C 2 1 b C 4 2 c C 6 If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] >>> df A B C D E F 0 a 1 2 1 b 3 4 2 c 5 6 >>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')]) (A, D) variable_0 variable_1 value 0 a B E 1 1 b B E 3 2 c B E 5
@Appender(_shared_docs["melt"] % {"caller": "pd.melt(df, ", "other": "DataFrame.melt"}) def melt( frame: DataFrame, id_vars=None, value_vars=None, var_name=None, value_name: Hashable = "value", col_level=None, ignore_index: bool = True, ) -> DataFrame: if value_name in frame.columns: raise ValueError( f"value_name ({value_name}) cannot match an element in " "the DataFrame columns." ) id_vars = ensure_list_vars(id_vars, "id_vars", frame.columns) value_vars_was_not_none = value_vars is not None value_vars = ensure_list_vars(value_vars, "value_vars", frame.columns) if id_vars or value_vars: if col_level is not None: level = frame.columns.get_level_values(col_level) else: level = frame.columns labels = id_vars + value_vars idx = level.get_indexer_for(labels) missing = idx == -1 if missing.any(): missing_labels = [ lab for lab, not_found in zip(labels, missing) if not_found ] raise KeyError( "The following id_vars or value_vars are not present in " f"the DataFrame: {missing_labels}" ) if value_vars_was_not_none: frame = frame.iloc[:, algos.unique(idx)] else: frame = frame.copy() else: frame = frame.copy() if col_level is not None: # allow list or other? # frame is a copy frame.columns = frame.columns.get_level_values(col_level) if var_name is None: if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: var_name = [f"variable_{i}" for i in range(len(frame.columns.names))] else: var_name = [ frame.columns.name if frame.columns.name is not None else "variable" ] elif is_list_like(var_name): raise ValueError(f"{var_name=} must be a scalar.") else: var_name = [var_name] num_rows, K = frame.shape num_cols_adjusted = K - len(id_vars) mdata: dict[Hashable, AnyArrayLike] = {} for col in id_vars: id_data = frame.pop(col) if not isinstance(id_data.dtype, np.dtype): # i.e. ExtensionDtype if num_cols_adjusted > 0: mdata[col] = concat([id_data] * num_cols_adjusted, ignore_index=True) else: # We can't concat empty list. (GH 46044) mdata[col] = type(id_data)([], name=id_data.name, dtype=id_data.dtype) else: mdata[col] = np.tile(id_data._values, num_cols_adjusted) mcolumns = id_vars + var_name + [value_name] if frame.shape[1] > 0 and not any( not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes ): mdata[value_name] = concat( [frame.iloc[:, i] for i in range(frame.shape[1])] ).values else: mdata[value_name] = frame._values.ravel("F") for i, col in enumerate(var_name): mdata[col] = frame.columns._get_level_values(i).repeat(num_rows) result = frame._constructor(mdata, columns=mcolumns) if not ignore_index: result.index = tile_compat(frame.index, num_cols_adjusted) return result
(frame: 'DataFrame', id_vars=None, value_vars=None, var_name=None, value_name: 'Hashable' = 'value', col_level=None, ignore_index: 'bool' = True) -> 'DataFrame'
68,338
pandas.core.reshape.merge
merge
Merge DataFrame or named Series objects with a database-style join. A named Series object is treated as a DataFrame with a single named column. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. When performing a cross merge, no column specifications to merge on are allowed. .. warning:: If both key columns contain rows where the key is a null value, those rows will be matched against each other. This is different from usual SQL join behaviour and can lead to unexpected results. Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series Object to merge with. how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. * cross: creates the cartesian product from both frames, preserves the order of the left keys. on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. copy : bool, default True If False, avoid copy if possible. .. note:: The `copy` keyword will change behavior in pandas 3.0. `Copy-on-Write <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ will be enabled by default, which means that all methods with a `copy` keyword will use a lazy copy mechanism to defer the copy and ignore the `copy` keyword. The `copy` keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` indicator : bool or str, default False If True, adds a column to the output DataFrame called "_merge" with information on the source of each row. The column can be given a different name by providing a string argument. The column will have a Categorical type with the value of "left_only" for observations whose merge key only appears in the left DataFrame, "right_only" for observations whose merge key only appears in the right DataFrame, and "both" if the observation's merge key is found in both DataFrames. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Examples -------- >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}) >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on='lkey', right_on='rkey') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 bar 2 bar 6 3 baz 3 baz 7 4 foo 5 foo 5 5 foo 5 foo 8 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', ... suffixes=('_left', '_right')) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 bar 2 bar 6 3 baz 3 baz 7 4 foo 5 foo 5 5 foo 5 foo 8 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') >>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) >>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]}) >>> df1 a b 0 foo 1 1 bar 2 >>> df2 a c 0 foo 3 1 baz 4 >>> df1.merge(df2, how='inner', on='a') a b c 0 foo 1 3 >>> df1.merge(df2, how='left', on='a') a b c 0 foo 1 3.0 1 bar 2 NaN >>> df1 = pd.DataFrame({'left': ['foo', 'bar']}) >>> df2 = pd.DataFrame({'right': [7, 8]}) >>> df1 left 0 foo 1 bar >>> df2 right 0 7 1 8 >>> df1.merge(df2, how='cross') left right 0 foo 7 1 foo 8 2 bar 7 3 bar 8
@Substitution("\nleft : DataFrame or named Series") @Appender(_merge_doc, indents=0) def merge( left: DataFrame | Series, right: DataFrame | Series, how: MergeHow = "inner", on: IndexLabel | AnyArrayLike | None = None, left_on: IndexLabel | AnyArrayLike | None = None, right_on: IndexLabel | AnyArrayLike | None = None, left_index: bool = False, right_index: bool = False, sort: bool = False, suffixes: Suffixes = ("_x", "_y"), copy: bool | None = None, indicator: str | bool = False, validate: str | None = None, ) -> DataFrame: left_df = _validate_operand(left) right_df = _validate_operand(right) if how == "cross": return _cross_merge( left_df, right_df, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate, copy=copy, ) else: op = _MergeOperation( left_df, right_df, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate, ) return op.get_result(copy=copy)
(left: 'DataFrame | Series', right: 'DataFrame | Series', how: 'MergeHow' = 'inner', on: 'IndexLabel | AnyArrayLike | None' = None, left_on: 'IndexLabel | AnyArrayLike | None' = None, right_on: 'IndexLabel | AnyArrayLike | None' = None, left_index: 'bool' = False, right_index: 'bool' = False, sort: 'bool' = False, suffixes: 'Suffixes' = ('_x', '_y'), copy: 'bool | None' = None, indicator: 'str | bool' = False, validate: 'str | None' = None) -> 'DataFrame'
68,339
pandas.core.reshape.merge
merge_asof
Perform a merge by key distance. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. Optionally match on equivalent keys with 'by' before searching with 'on'. Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : bool Use the index of the left DataFrame as the join key. right_index : bool Use the index of the right DataFrame as the join key. by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. right_by : column name Field names to match on in the right DataFrame. suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : int or Timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : bool, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than). direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. Returns ------- DataFrame See Also -------- merge : Merge with a database-style join. merge_ordered : Merge with optional filling/interpolation. Examples -------- >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on="a") a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on="a", direction="forward") a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on="a", direction="nearest") a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.030"), ... pd.Timestamp("2016-05-25 13:30:00.041"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.049"), ... pd.Timestamp("2016-05-25 13:30:00.072"), ... pd.Timestamp("2016-05-25 13:30:00.075") ... ], ... "ticker": [ ... "GOOG", ... "MSFT", ... "MSFT", ... "MSFT", ... "GOOG", ... "AAPL", ... "GOOG", ... "MSFT" ... ], ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] ... } ... ) >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.038"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048") ... ], ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], ... "quantity": [75, 155, 100, 100, 100] ... } ... ) >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, on="time", by="ticker") time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof( ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof( ... trades, ... quotes, ... on="time", ... by="ticker", ... tolerance=pd.Timedelta("10ms"), ... allow_exact_matches=False ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
def merge_asof( left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, by=None, left_by=None, right_by=None, suffixes: Suffixes = ("_x", "_y"), tolerance: int | Timedelta | None = None, allow_exact_matches: bool = True, direction: str = "backward", ) -> DataFrame: """ Perform a merge by key distance. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. Optionally match on equivalent keys with 'by' before searching with 'on'. Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : bool Use the index of the left DataFrame as the join key. right_index : bool Use the index of the right DataFrame as the join key. by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. right_by : column name Field names to match on in the right DataFrame. suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : int or Timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : bool, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than). direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. Returns ------- DataFrame See Also -------- merge : Merge with a database-style join. merge_ordered : Merge with optional filling/interpolation. Examples -------- >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on="a") a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on="a", direction="forward") a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on="a", direction="nearest") a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.030"), ... pd.Timestamp("2016-05-25 13:30:00.041"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.049"), ... pd.Timestamp("2016-05-25 13:30:00.072"), ... pd.Timestamp("2016-05-25 13:30:00.075") ... ], ... "ticker": [ ... "GOOG", ... "MSFT", ... "MSFT", ... "MSFT", ... "GOOG", ... "AAPL", ... "GOOG", ... "MSFT" ... ], ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] ... } ... ) >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades = pd.DataFrame( ... { ... "time": [ ... pd.Timestamp("2016-05-25 13:30:00.023"), ... pd.Timestamp("2016-05-25 13:30:00.038"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048"), ... pd.Timestamp("2016-05-25 13:30:00.048") ... ], ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], ... "quantity": [75, 155, 100, 100, 100] ... } ... ) >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, on="time", by="ticker") time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof( ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof( ... trades, ... quotes, ... on="time", ... by="ticker", ... tolerance=pd.Timedelta("10ms"), ... allow_exact_matches=False ... ) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN """ op = _AsOfMerge( left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, by=by, left_by=left_by, right_by=right_by, suffixes=suffixes, how="asof", tolerance=tolerance, allow_exact_matches=allow_exact_matches, direction=direction, ) return op.get_result()
(left: 'DataFrame | Series', right: 'DataFrame | Series', on: 'IndexLabel | None' = None, left_on: 'IndexLabel | None' = None, right_on: 'IndexLabel | None' = None, left_index: 'bool' = False, right_index: 'bool' = False, by=None, left_by=None, right_by=None, suffixes: 'Suffixes' = ('_x', '_y'), tolerance: 'int | Timedelta | None' = None, allow_exact_matches: 'bool' = True, direction: 'str' = 'backward') -> 'DataFrame'
68,340
pandas.core.reshape.merge
merge_ordered
Perform a merge for ordered data with optional filling/interpolation. Designed for ordered data like time series data. Optionally perform group-wise merge (see examples). Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label or list Field names to join on. Must be found in both DataFrames. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns. right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs. left_by : column name or list of column names Group left DataFrame by group columns and merge piece by piece with right DataFrame. Must be None if either left or right are a Series. right_by : column name or list of column names Group right DataFrame by group columns and merge piece by piece with left DataFrame. Must be None if either left or right are a Series. fill_method : {'ffill', None}, default None Interpolation method for data. suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join). Returns ------- DataFrame The merged DataFrame output type will be the same as 'left', if it is a subclass of DataFrame. See Also -------- merge : Merge with a database-style join. merge_asof : Merge on nearest keys. Examples -------- >>> from pandas import merge_ordered >>> df1 = pd.DataFrame( ... { ... "key": ["a", "c", "e", "a", "c", "e"], ... "lvalue": [1, 2, 3, 1, 2, 3], ... "group": ["a", "a", "a", "b", "b", "b"] ... } ... ) >>> df1 key lvalue group 0 a 1 a 1 c 2 a 2 e 3 a 3 a 1 b 4 c 2 b 5 e 3 b >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) >>> df2 key rvalue 0 b 1 1 c 2 2 d 3 >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") key lvalue group rvalue 0 a 1 a NaN 1 b 1 a 1.0 2 c 2 a 2.0 3 d 2 a 3.0 4 e 3 a 3.0 5 a 1 b NaN 6 b 1 b 1.0 7 c 2 b 2.0 8 d 2 b 3.0 9 e 3 b 3.0
def merge_ordered( left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_by=None, right_by=None, fill_method: str | None = None, suffixes: Suffixes = ("_x", "_y"), how: JoinHow = "outer", ) -> DataFrame: """ Perform a merge for ordered data with optional filling/interpolation. Designed for ordered data like time series data. Optionally perform group-wise merge (see examples). Parameters ---------- left : DataFrame or named Series right : DataFrame or named Series on : label or list Field names to join on. Must be found in both DataFrames. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns. right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs. left_by : column name or list of column names Group left DataFrame by group columns and merge piece by piece with right DataFrame. Must be None if either left or right are a Series. right_by : column name or list of column names Group right DataFrame by group columns and merge piece by piece with left DataFrame. Must be None if either left or right are a Series. fill_method : {'ffill', None}, default None Interpolation method for data. suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join). Returns ------- DataFrame The merged DataFrame output type will be the same as 'left', if it is a subclass of DataFrame. See Also -------- merge : Merge with a database-style join. merge_asof : Merge on nearest keys. Examples -------- >>> from pandas import merge_ordered >>> df1 = pd.DataFrame( ... { ... "key": ["a", "c", "e", "a", "c", "e"], ... "lvalue": [1, 2, 3, 1, 2, 3], ... "group": ["a", "a", "a", "b", "b", "b"] ... } ... ) >>> df1 key lvalue group 0 a 1 a 1 c 2 a 2 e 3 a 3 a 1 b 4 c 2 b 5 e 3 b >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) >>> df2 key rvalue 0 b 1 1 c 2 2 d 3 >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") key lvalue group rvalue 0 a 1 a NaN 1 b 1 a 1.0 2 c 2 a 2.0 3 d 2 a 3.0 4 e 3 a 3.0 5 a 1 b NaN 6 b 1 b 1.0 7 c 2 b 2.0 8 d 2 b 3.0 9 e 3 b 3.0 """ def _merger(x, y) -> DataFrame: # perform the ordered merge operation op = _OrderedMerge( x, y, on=on, left_on=left_on, right_on=right_on, suffixes=suffixes, fill_method=fill_method, how=how, ) return op.get_result() if left_by is not None and right_by is not None: raise ValueError("Can only group either left or right frames") if left_by is not None: if isinstance(left_by, str): left_by = [left_by] check = set(left_by).difference(left.columns) if len(check) != 0: raise KeyError(f"{check} not found in left columns") result, _ = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y)) elif right_by is not None: if isinstance(right_by, str): right_by = [right_by] check = set(right_by).difference(right.columns) if len(check) != 0: raise KeyError(f"{check} not found in right columns") result, _ = _groupby_and_merge( right_by, right, left, lambda x, y: _merger(y, x) ) else: result = _merger(left, right) return result
(left: 'DataFrame | Series', right: 'DataFrame | Series', on: 'IndexLabel | None' = None, left_on: 'IndexLabel | None' = None, right_on: 'IndexLabel | None' = None, left_by=None, right_by=None, fill_method: 'str | None' = None, suffixes: 'Suffixes' = ('_x', '_y'), how: 'JoinHow' = 'outer') -> 'DataFrame'
68,341
pandas.core.dtypes.missing
notna
Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool
def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res
(obj: 'object') -> 'bool | npt.NDArray[np.bool_] | NDFrame'
68,344
pandas._config.config
option_context
Context manager to temporarily set options in the `with` statement context. You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. Examples -------- >>> from pandas import option_context >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): ... pass
class option_context(ContextDecorator): """ Context manager to temporarily set options in the `with` statement context. You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. Examples -------- >>> from pandas import option_context >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): ... pass """ def __init__(self, *args) -> None: if len(args) % 2 != 0 or len(args) < 2: raise ValueError( "Need to invoke as option_context(pat, val, [(pat, val), ...])." ) self.ops = list(zip(args[::2], args[1::2])) def __enter__(self) -> None: self.undo = [(pat, _get_option(pat)) for pat, val in self.ops] for pat, val in self.ops: _set_option(pat, val, silent=True) def __exit__(self, *args) -> None: if self.undo: for pat, val in self.undo: _set_option(pat, val, silent=True)
(*args) -> 'None'
68,345
contextlib
__call__
null
def __call__(self, func): @wraps(func) def inner(*args, **kwds): with self._recreate_cm(): return func(*args, **kwds) return inner
(self, func)
68,346
pandas._config.config
__enter__
null
def __enter__(self) -> None: self.undo = [(pat, _get_option(pat)) for pat, val in self.ops] for pat, val in self.ops: _set_option(pat, val, silent=True)
(self) -> NoneType
68,347
pandas._config.config
__exit__
null
def __exit__(self, *args) -> None: if self.undo: for pat, val in self.undo: _set_option(pat, val, silent=True)
(self, *args) -> NoneType
68,348
pandas._config.config
__init__
null
def __init__(self, *args) -> None: if len(args) % 2 != 0 or len(args) < 2: raise ValueError( "Need to invoke as option_context(pat, val, [(pat, val), ...])." ) self.ops = list(zip(args[::2], args[1::2]))
(self, *args) -> NoneType
68,349
contextlib
_recreate_cm
Return a recreated instance of self. Allows an otherwise one-shot context manager like _GeneratorContextManager to support use as a decorator via implicit recreation. This is a private interface just for _GeneratorContextManager. See issue #11647 for details.
def _recreate_cm(self): """Return a recreated instance of self. Allows an otherwise one-shot context manager like _GeneratorContextManager to support use as a decorator via implicit recreation. This is a private interface just for _GeneratorContextManager. See issue #11647 for details. """ return self
(self)
68,351
pandas.core.indexes.period
period_range
Return a fixed frequency PeriodIndex. The day (calendar) is the default frequency. Parameters ---------- start : str, datetime, date, pandas.Timestamp, or period-like, default None Left bound for generating periods. end : str, datetime, date, pandas.Timestamp, or period-like, default None Right bound for generating periods. periods : int, default None Number of periods to generate. freq : str or DateOffset, optional Frequency alias. By default the freq is taken from `start` or `end` if those are Period objects. Otherwise, the default is ``"D"`` for daily frequency. name : str, default None Name of the resulting PeriodIndex. Returns ------- PeriodIndex Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M') PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', '2018-01'], dtype='period[M]') If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the ``period_range`` constructor. >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), ... end=pd.Period('2017Q2', freq='Q'), freq='M') PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]')
def period_range( start=None, end=None, periods: int | None = None, freq=None, name: Hashable | None = None, ) -> PeriodIndex: """ Return a fixed frequency PeriodIndex. The day (calendar) is the default frequency. Parameters ---------- start : str, datetime, date, pandas.Timestamp, or period-like, default None Left bound for generating periods. end : str, datetime, date, pandas.Timestamp, or period-like, default None Right bound for generating periods. periods : int, default None Number of periods to generate. freq : str or DateOffset, optional Frequency alias. By default the freq is taken from `start` or `end` if those are Period objects. Otherwise, the default is ``"D"`` for daily frequency. name : str, default None Name of the resulting PeriodIndex. Returns ------- PeriodIndex Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M') PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', '2018-01'], dtype='period[M]') If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the ``period_range`` constructor. >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), ... end=pd.Period('2017Q2', freq='Q'), freq='M') PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]') """ if com.count_not_none(start, end, periods) != 2: raise ValueError( "Of the three parameters: start, end, and periods, " "exactly two must be specified" ) if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)): freq = "D" data, freq = PeriodArray._generate_range(start, end, periods, freq) dtype = PeriodDtype(freq) data = PeriodArray(data, dtype=dtype) return PeriodIndex(data, name=name)
(start=None, end=None, periods: 'int | None' = None, freq=None, name: 'Hashable | None' = None) -> 'PeriodIndex'
68,352
pandas.core.reshape.pivot
pivot
Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation, multiple values will result in a MultiIndex in the columns. See the :ref:`User Guide <reshaping>` for more on reshaping. Parameters ---------- data : DataFrame columns : str or object or a list of str Column to use to make new frame's columns. index : str or object or a list of str, optional Column to use to make new frame's index. If not given, uses existing index. values : str, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. Returns ------- DataFrame Returns reshaped DataFrame. Raises ------ ValueError: When there are any `index`, `columns` combinations with multiple values. `DataFrame.pivot_table` when you need to aggregate. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods. Reference :ref:`the user guide <reshaping.pivot>` for more examples. Examples -------- >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz') bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) baz zoo bar A B C A B C foo one 1 2 3 x y z two 4 5 6 q w t You could also assign a list of column names or a list of index names. >>> df = pd.DataFrame({ ... "lev1": [1, 1, 1, 2, 2, 2], ... "lev2": [1, 1, 2, 1, 1, 2], ... "lev3": [1, 2, 1, 2, 1, 2], ... "lev4": [1, 2, 3, 4, 5, 6], ... "values": [0, 1, 2, 3, 4, 5]}) >>> df lev1 lev2 lev3 lev4 values 0 1 1 1 1 0 1 1 1 2 2 1 2 1 2 1 3 2 3 2 1 2 4 3 4 2 1 1 5 4 5 2 2 2 6 5 >>> df.pivot(index="lev1", columns=["lev2", "lev3"], values="values") lev2 1 2 lev3 1 2 1 2 lev1 1 0.0 1.0 2.0 NaN 2 4.0 3.0 NaN 5.0 >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"], values="values") lev3 1 2 lev1 lev2 1 1 0.0 1.0 2 2.0 NaN 2 1 4.0 3.0 2 NaN 5.0 A ValueError is raised if there are any duplicates. >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 Notice that the first two rows are the same for our `index` and `columns` arguments. >>> df.pivot(index='foo', columns='bar', values='baz') Traceback (most recent call last): ... ValueError: Index contains duplicate entries, cannot reshape
@Substitution("\ndata : DataFrame") @Appender(_shared_docs["pivot"], indents=1) def pivot( data: DataFrame, *, columns: IndexLabel, index: IndexLabel | lib.NoDefault = lib.no_default, values: IndexLabel | lib.NoDefault = lib.no_default, ) -> DataFrame: columns_listlike = com.convert_to_list_like(columns) # If columns is None we will create a MultiIndex level with None as name # which might cause duplicated names because None is the default for # level names data = data.copy(deep=False) data.index = data.index.copy() data.index.names = [ name if name is not None else lib.no_default for name in data.index.names ] indexed: DataFrame | Series if values is lib.no_default: if index is not lib.no_default: cols = com.convert_to_list_like(index) else: cols = [] append = index is lib.no_default # error: Unsupported operand types for + ("List[Any]" and "ExtensionArray") # error: Unsupported left operand type for + ("ExtensionArray") indexed = data.set_index( cols + columns_listlike, append=append # type: ignore[operator] ) else: index_list: list[Index] | list[Series] if index is lib.no_default: if isinstance(data.index, MultiIndex): # GH 23955 index_list = [ data.index.get_level_values(i) for i in range(data.index.nlevels) ] else: index_list = [ data._constructor_sliced(data.index, name=data.index.name) ] else: index_list = [data[idx] for idx in com.convert_to_list_like(index)] data_columns = [data[col] for col in columns_listlike] index_list.extend(data_columns) multiindex = MultiIndex.from_arrays(index_list) if is_list_like(values) and not isinstance(values, tuple): # Exclude tuple because it is seen as a single column name values = cast(Sequence[Hashable], values) indexed = data._constructor( data[values]._values, index=multiindex, columns=values ) else: indexed = data._constructor_sliced(data[values]._values, index=multiindex) # error: Argument 1 to "unstack" of "DataFrame" has incompatible type "Union # [List[Any], ExtensionArray, ndarray[Any, Any], Index, Series]"; expected # "Hashable" result = indexed.unstack(columns_listlike) # type: ignore[arg-type] result.index.names = [ name if name is not lib.no_default else None for name in result.index.names ] return result
(data: 'DataFrame', *, columns: 'IndexLabel', index: 'IndexLabel | lib.NoDefault' = <no_default>, values: 'IndexLabel | lib.NoDefault' = <no_default>) -> 'DataFrame'
68,353
pandas.core.reshape.pivot
pivot_table
Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- data : DataFrame values : list-like or scalar, optional Column or columns to aggregate. index : column, Grouper, array, or list of the previous Keys to group by on the pivot table index. If a list is passed, it can contain any of the other types (except list). If an array is passed, it must be the same length as the data and will be used in the same manner as column values. columns : column, Grouper, array, or list of the previous Keys to group by on the pivot table column. If a list is passed, it can contain any of the other types (except list). If an array is passed, it must be the same length as the data and will be used in the same manner as column values. aggfunc : function, list of functions, dict, default "mean" If a list of functions is passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves). If a dict is passed, the key is column to aggregate and the value is function or list of functions. If ``margin=True``, aggfunc will be used to calculate the partial aggregates. fill_value : scalar, default None Value to replace missing values with (in the resulting pivot table, after aggregation). margins : bool, default False If ``margins=True``, special ``All`` columns and rows will be added with partial group aggregates across the categories on the rows and columns. dropna : bool, default True Do not include columns whose entries are all NaN. If True, rows with a NaN value in any column will be omitted before computing margins. margins_name : str, default 'All' Name of the row / column that will contain the totals when margins is True. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. deprecated:: 2.2.0 The default value of ``False`` is deprecated and will change to ``True`` in a future version of pandas. sort : bool, default True Specifies if the result should be sorted. .. versionadded:: 1.3.0 Returns ------- DataFrame An Excel style pivot table. See Also -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.melt: Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Notes ----- Reference :ref:`the user guide <reshaping.pivot>` for more examples. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc="sum") >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 We can also fill missing values using the `fill_value` parameter. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc="sum", fill_value=0) >>> table C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 The next example aggregates by taking the mean across multiple columns. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': "mean", 'E': "mean"}) >>> table D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 foo large 2.000000 4.500000 small 2.333333 4.333333 We can also calculate multiple types of aggregations for any given value column. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': "mean", ... 'E': ["min", "max", "mean"]}) >>> table D E mean max mean min A C bar large 5.500000 9 7.500000 6 small 5.500000 9 8.500000 8 foo large 2.000000 5 4.500000 4 small 2.333333 6 4.333333 2
@Substitution("\ndata : DataFrame") @Appender(_shared_docs["pivot_table"], indents=1) def pivot_table( data: DataFrame, values=None, index=None, columns=None, aggfunc: AggFuncType = "mean", fill_value=None, margins: bool = False, dropna: bool = True, margins_name: Hashable = "All", observed: bool | lib.NoDefault = lib.no_default, sort: bool = True, ) -> DataFrame: index = _convert_by(index) columns = _convert_by(columns) if isinstance(aggfunc, list): pieces: list[DataFrame] = [] keys = [] for func in aggfunc: _table = __internal_pivot_table( data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, sort=sort, ) pieces.append(_table) keys.append(getattr(func, "__name__", func)) table = concat(pieces, keys=keys, axis=1) return table.__finalize__(data, method="pivot_table") table = __internal_pivot_table( data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name, observed, sort, ) return table.__finalize__(data, method="pivot_table")
(data: 'DataFrame', values=None, index=None, columns=None, aggfunc: 'AggFuncType' = 'mean', fill_value=None, margins: 'bool' = False, dropna: 'bool' = True, margins_name: 'Hashable' = 'All', observed: 'bool | lib.NoDefault' = <no_default>, sort: 'bool' = True) -> 'DataFrame'
68,355
pandas.core.reshape.tile
qcut
Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : int or list-like of float Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. labels : array or False, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. If True, raises an error. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3])
def qcut( x, q, labels=None, retbins: bool = False, precision: int = 3, duplicates: str = "raise", ): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : int or list-like of float Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. labels : array or False, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. If True, raises an error. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) """ original = x x_idx = _preprocess_for_cut(x) x_idx, _ = _coerce_to_type(x_idx) quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q bins = x_idx.to_series().dropna().quantile(quantiles) fac, bins = _bins_to_cuts( x_idx, Index(bins), labels=labels, precision=precision, include_lowest=True, duplicates=duplicates, ) return _postprocess_for_cut(fac, bins, retbins, original)
(x, q, labels=None, retbins: bool = False, precision: int = 3, duplicates: str = 'raise')
68,356
pandas.io.clipboards
read_clipboard
Read text from clipboard and pass to :func:`~pandas.read_csv`. Parses clipboard contents similar to how CSV files are parsed using :func:`~pandas.read_csv`. Parameters ---------- sep : str, default '\\s+' A string or regex delimiter. The default of ``'\\s+'`` denotes one or more whitespace characters. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 **kwargs See :func:`~pandas.read_csv` for the full argument list. Returns ------- DataFrame A parsed :class:`~pandas.DataFrame` object. See Also -------- DataFrame.to_clipboard : Copy object to the system clipboard. read_csv : Read a comma-separated values (csv) file into DataFrame. read_fwf : Read a table of fixed-width formatted lines into DataFrame. Examples -------- >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard() # doctest: +SKIP >>> pd.read_clipboard() # doctest: +SKIP A B C 0 1 2 3 1 4 5 6
def read_clipboard( sep: str = r"\s+", dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, ): # pragma: no cover r""" Read text from clipboard and pass to :func:`~pandas.read_csv`. Parses clipboard contents similar to how CSV files are parsed using :func:`~pandas.read_csv`. Parameters ---------- sep : str, default '\\s+' A string or regex delimiter. The default of ``'\\s+'`` denotes one or more whitespace characters. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 **kwargs See :func:`~pandas.read_csv` for the full argument list. Returns ------- DataFrame A parsed :class:`~pandas.DataFrame` object. See Also -------- DataFrame.to_clipboard : Copy object to the system clipboard. read_csv : Read a comma-separated values (csv) file into DataFrame. read_fwf : Read a table of fixed-width formatted lines into DataFrame. Examples -------- >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard() # doctest: +SKIP >>> pd.read_clipboard() # doctest: +SKIP A B C 0 1 2 3 1 4 5 6 """ encoding = kwargs.pop("encoding", "utf-8") # only utf-8 is valid for passed value because that's what clipboard # supports if encoding is not None and encoding.lower().replace("-", "") != "utf8": raise NotImplementedError("reading from clipboard only supports utf-8 encoding") check_dtype_backend(dtype_backend) from pandas.io.clipboard import clipboard_get from pandas.io.parsers import read_csv text = clipboard_get() # Try to decode (if needed, as "text" might already be a string here). try: text = text.decode(kwargs.get("encoding") or get_option("display.encoding")) except AttributeError: pass # Excel copies into clipboard with \t separation # inspect no more then the 10 first lines, if they # all contain an equal number (>0) of tabs, infer # that this came from excel and set 'sep' accordingly lines = text[:10000].split("\n")[:-1][:10] # Need to remove leading white space, since read_csv # accepts: # a b # 0 1 2 # 1 3 4 counts = {x.lstrip(" ").count("\t") for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: sep = "\t" # check the number of leading tabs in the first line # to account for index columns index_length = len(lines[0]) - len(lines[0].lstrip(" \t")) if index_length != 0: kwargs.setdefault("index_col", list(range(index_length))) # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get("delim_whitespace") is None: sep = r"\s+" # Regex separator currently only works with python engine. # Default to python if separator is multi-character (regex) if len(sep) > 1 and kwargs.get("engine") is None: kwargs["engine"] = "python" elif len(sep) > 1 and kwargs.get("engine") == "c": warnings.warn( "read_clipboard with regex separator does not work properly with c engine.", stacklevel=find_stack_level(), ) return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs)
(sep: 'str' = '\\s+', dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, **kwargs)
68,357
pandas.io.parsers.readers
read_csv
Read a comma-separated values (csv) file into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the online docs for `IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. sep : str, default ',' Character or regex pattern to treat as the delimiter. If ``sep=None``, the C engine cannot automatically detect the separator, but the Python parsing engine can, meaning the latter will be used and automatically detect the separator from only the first valid row of the file by Python's builtin sniffer tool, ``csv.Sniffer``. In addition, separators longer than 1 character and different from ``'\s+'`` will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``. delimiter : str, optional Alias for ``sep``. header : int, Sequence of int, 'infer' or None, default 'infer' Row number(s) containing column labels and marking the start of the data (zero-indexed). Default behavior is to infer the column names: if no ``names`` are passed the behavior is identical to ``header=0`` and column names are inferred from the first line of the file, if column names are passed explicitly to ``names`` then the behavior is identical to ``header=None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a :class:`~pandas.MultiIndex` on the columns e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be skipped (e.g. 2 in this example is skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so ``header=0`` denotes the first line of data rather than the first line of the file. names : Sequence of Hashable, optional Sequence of column labels to apply. If the file contains a header row, then you should explicitly pass ``header=0`` to override the column names. Duplicates in this list are not allowed. index_col : Hashable, Sequence of Hashable or False, optional Column(s) to use as row label(s), denoted either by column labels or column indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex` will be formed for the row labels. Note: ``index_col=False`` can be used to force pandas to *not* use the first column as the index, e.g., when you have a malformed file with delimiters at the end of each line. usecols : Sequence of Hashable or Callable, optional Subset of columns to select, denoted either by column labels or column indices. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in ``names`` or inferred from the document header row(s). If ``names`` are given, the document header row(s) are not taken into account. For example, a valid list-like ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns in ``['foo', 'bar']`` order or ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` for ``['bar', 'foo']`` order. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to ``True``. An example of a valid callable argument would be ``lambda x: x.upper() in ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster parsing time and lower memory usage. dtype : dtype or dict of {Hashable : dtype}, optional Data type(s) to apply to either the whole dataset or individual columns. E.g., ``{'a': np.float64, 'b': np.int32, 'c': 'Int64'}`` Use ``str`` or ``object`` together with suitable ``na_values`` settings to preserve and not interpret ``dtype``. If ``converters`` are specified, they will be applied INSTEAD of ``dtype`` conversion. .. versionadded:: 1.5.0 Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where the default determines the ``dtype`` of the columns which are not explicitly listed. engine : {'c', 'python', 'pyarrow'}, optional Parser engine to use. The C and pyarrow engines are faster, while the python engine is currently more feature-complete. Multithreading is currently only supported by the pyarrow engine. .. versionadded:: 1.4.0 The 'pyarrow' engine was added as an *experimental* engine, and some features are unsupported, or may not work correctly, with this engine. converters : dict of {Hashable : Callable}, optional Functions for converting values in specified columns. Keys can either be column labels or column indices. true_values : list, optional Values to consider as ``True`` in addition to case-insensitive variants of 'True'. false_values : list, optional Values to consider as ``False`` in addition to case-insensitive variants of 'False'. skipinitialspace : bool, default False Skip spaces after delimiter. skiprows : int, list of int or Callable, optional Line numbers to skip (0-indexed) or number of lines to skip (``int``) at the start of the file. If callable, the callable function will be evaluated against the row indices, returning ``True`` if the row should be skipped and ``False`` otherwise. An example of a valid callable argument would be ``lambda x: x in [0, 2]``. skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with ``engine='c'``). nrows : int, optional Number of rows of file to read. Useful for reading pieces of large files. na_values : Hashable, Iterable of Hashable or dict of {Hashable : Iterable}, optional Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific per-column ``NA`` values. By default the following values are interpreted as ``NaN``: " ", "#N/A", "#N/A N/A", "#NA", "-1.#IND", "-1.#QNAN", "-NaN", "-nan", "1.#IND", "1.#QNAN", "<NA>", "N/A", "NA", "NULL", "NaN", "None", "n/a", "nan", "null ". keep_default_na : bool, default True Whether or not to include the default ``NaN`` values when parsing the data. Depending on whether ``na_values`` is passed in, the behavior is as follows: * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values`` is appended to the default ``NaN`` values used for parsing. * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only the default ``NaN`` values are used for parsing. * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only the ``NaN`` values specified ``na_values`` are used for parsing. * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no strings will be parsed as ``NaN``. Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and ``na_values`` parameters will be ignored. na_filter : bool, default True Detect missing value markers (empty strings and the value of ``na_values``). In data without any ``NA`` values, passing ``na_filter=False`` can improve the performance of reading a large file. verbose : bool, default False Indicate number of ``NA`` values placed in non-numeric columns. .. deprecated:: 2.2.0 skip_blank_lines : bool, default True If ``True``, skip over blank lines rather than interpreting as ``NaN`` values. parse_dates : bool, list of Hashable, list of lists or dict of {Hashable : list}, default False The behavior is as follows: * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to ``True`` if ``date_format`` or ``date_parser`` arguments have been passed. * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3 each as a separate date column. * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse as a single date column. Values are joined with a space before parsing. * ``dict``, e.g. ``{'foo' : [1, 3]}`` -> parse columns 1, 3 as date and call result 'foo'. Values are joined with a space before parsing. If a column or index cannot be represented as an array of ``datetime``, say because of an unparsable value or a mixture of timezones, the column or index will be returned unaltered as an ``object`` data type. For non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after :func:`~pandas.read_csv`. Note: A fast-path exists for iso8601-formatted dates. infer_datetime_format : bool, default False If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the format of the ``datetime`` strings in the columns, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by 5-10x. .. deprecated:: 2.0.0 A strict version of this argument is now the default, passing it has no effect. keep_date_col : bool, default False If ``True`` and ``parse_dates`` specifies combining multiple columns then keep the original columns. date_parser : Callable, optional Function to use for converting a sequence of string columns to an array of ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the conversion. pandas will try to call ``date_parser`` in three different ways, advancing to the next if an exception occurs: 1) Pass one or more arrays (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the string values from the columns defined by ``parse_dates`` into a single array and pass that; and 3) call ``date_parser`` once for each row using one or more strings (corresponding to the columns defined by ``parse_dates``) as arguments. .. deprecated:: 2.0.0 Use ``date_format`` instead, or read in as ``object`` and then apply :func:`~pandas.to_datetime` as-needed. date_format : str or dict of column -> format, optional Format to use for parsing dates when used in conjunction with ``parse_dates``. The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See `strftime documentation <https://docs.python.org/3/library/datetime.html #strftime-and-strptime-behavior>`_ for more information on choices, though note that :const:`"%f"` will parse all the way up to nanoseconds. You can also pass: - "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ time string (not necessarily in exactly the same format); - "mixed", to infer the format for each element individually. This is risky, and you should probably use it along with `dayfirst`. .. versionadded:: 2.0.0 dayfirst : bool, default False DD/MM format dates, international and European format. cache_dates : bool, default True If ``True``, use a cache of unique, converted dates to apply the ``datetime`` conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. iterator : bool, default False Return ``TextFileReader`` object for iteration or getting chunks with ``get_chunk()``. chunksize : int, optional Number of lines to read from the file per chunk. Passing a value will cause the function to return a ``TextFileReader`` object for iteration. See the `IO Tools docs <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_ for more information on ``iterator`` and ``chunksize``. compression : str or dict, default 'infer' For on-the-fly decompression of on-disk data. If 'infer' and 'filepath_or_buffer' is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2' (otherwise no compression). If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in. Set to ``None`` for no decompression. Can also be a dict with key ``'method'`` set to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and other key-value pairs are forwarded to ``zipfile.ZipFile``, ``gzip.GzipFile``, ``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or ``tarfile.TarFile``, respectively. As an example, the following could be passed for Zstandard decompression using a custom compression dictionary: ``compression={'method': 'zstd', 'dict_data': my_compression_dict}``. .. versionadded:: 1.5.0 Added support for `.tar` files. .. versionchanged:: 1.4.0 Zstandard support. thousands : str (length 1), optional Character acting as the thousands separator in numerical values. decimal : str (length 1), default '.' Character to recognize as decimal point (e.g., use ',' for European data). lineterminator : str (length 1), optional Character used to denote a line break. Only valid with C parser. quotechar : str (length 1), optional Character used to denote the start and end of a quoted item. Quoted items can include the ``delimiter`` and it will be ignored. quoting : {0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, 3 or csv.QUOTE_NONE}, default csv.QUOTE_MINIMAL Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``, or ``lineterminator``. doublequote : bool, default True When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate whether or not to interpret two consecutive ``quotechar`` elements INSIDE a field as a single ``quotechar`` element. escapechar : str (length 1), optional Character used to escape other characters. comment : str (length 1), optional Character indicating that the remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. Like empty lines (as long as ``skip_blank_lines=True``), fully commented lines are ignored by the parameter ``header`` but not by ``skiprows``. For example, if ``comment='#'``, parsing ``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being treated as the header. encoding : str, optional, default 'utf-8' Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ . encoding_errors : str, optional, default 'strict' How encoding errors are treated. `List of possible values <https://docs.python.org/3/library/codecs.html#error-handlers>`_ . .. versionadded:: 1.3.0 dialect : str or csv.Dialect, optional If provided, this parameter will override values (default or not) for the following parameters: ``delimiter``, ``doublequote``, ``escapechar``, ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to override values, a ``ParserWarning`` will be issued. See ``csv.Dialect`` documentation for more details. on_bad_lines : {'error', 'warn', 'skip'} or Callable, default 'error' Specifies what to do upon encountering a bad line (a line with too many fields). Allowed values are : - ``'error'``, raise an Exception when a bad line is encountered. - ``'warn'``, raise a warning when a bad line is encountered and skip that line. - ``'skip'``, skip bad lines without raising or warning when they are encountered. .. versionadded:: 1.3.0 .. versionadded:: 1.4.0 - Callable, function with signature ``(bad_line: list[str]) -> list[str] | None`` that will process a single bad line. ``bad_line`` is a list of strings split by the ``sep``. If the function returns ``None``, the bad line will be ignored. If the function returns a new ``list`` of strings with more elements than expected, a ``ParserWarning`` will be emitted while dropping extra elements. Only supported when ``engine='python'`` .. versionchanged:: 2.2.0 - Callable, function with signature as described in `pyarrow documentation <https://arrow.apache.org/docs/python/generated/pyarrow.csv.ParseOptions.html #pyarrow.csv.ParseOptions.invalid_row_handler>`_ when ``engine='pyarrow'`` delim_whitespace : bool, default False Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be used as the ``sep`` delimiter. Equivalent to setting ``sep='\s+'``. If this option is set to ``True``, nothing should be passed in for the ``delimiter`` parameter. .. deprecated:: 2.2.0 Use ``sep="\s+"`` instead. low_memory : bool, default True Internally process the file in chunks, resulting in lower memory use while parsing, but possibly mixed type inference. To ensure no mixed types either set ``False``, or specify the type with the ``dtype`` parameter. Note that the entire file is read into a single :class:`~pandas.DataFrame` regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in chunks. (Only valid with C parser). memory_map : bool, default False If a filepath is provided for ``filepath_or_buffer``, map the file object directly onto memory and access the data directly from there. Using this option can improve performance because there is no longer any I/O overhead. float_precision : {'high', 'legacy', 'round_trip'}, optional Specifies which converter the C engine should use for floating-point values. The options are ``None`` or ``'high'`` for the ordinary converter, ``'legacy'`` for the original lower precision pandas converter, and ``'round_trip'`` for the round-trip converter. storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc. For HTTP(S) URLs the key-value pairs are forwarded to ``urllib.request.Request`` as header options. For other URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more details, and for more examples on storage options refer `here <https://pandas.pydata.org/docs/user_guide/io.html? highlight=storage_options#reading-writing-remote-files>`_. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 Returns ------- DataFrame or TextFileReader A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. read_table : Read general delimited file into DataFrame. read_fwf : Read a table of fixed-width formatted lines into DataFrame. Examples -------- >>> pd.read_csv('data.csv') # doctest: +SKIP
@Appender( _doc_read_csv_and_table.format( func_name="read_csv", summary="Read a comma-separated values (csv) file into DataFrame.", see_also_func_name="read_table", see_also_func_summary="Read general delimited file into DataFrame.", _default_sep="','", storage_options=_shared_docs["storage_options"], decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer", ) ) def read_csv( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None | lib.NoDefault = lib.no_default, delimiter: str | None | lib.NoDefault = None, # Column and Index Locations and Names header: int | Sequence[int] | None | Literal["infer"] = "infer", names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, index_col: IndexLabel | Literal[False] | None = None, usecols: UsecolsArgType = None, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None, converters: Mapping[Hashable, Callable] | None = None, true_values: list | None = None, false_values: list | None = None, skipinitialspace: bool = False, skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, skipfooter: int = 0, nrows: int | None = None, # NA and Missing Data Handling na_values: Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None = None, keep_default_na: bool = True, na_filter: bool = True, verbose: bool | lib.NoDefault = lib.no_default, skip_blank_lines: bool = True, # Datetime Handling parse_dates: bool | Sequence[Hashable] | None = None, infer_datetime_format: bool | lib.NoDefault = lib.no_default, keep_date_col: bool | lib.NoDefault = lib.no_default, date_parser: Callable | lib.NoDefault = lib.no_default, date_format: str | dict[Hashable, str] | None = None, dayfirst: bool = False, cache_dates: bool = True, # Iteration iterator: bool = False, chunksize: int | None = None, # Quoting, Compression, and File Format compression: CompressionOptions = "infer", thousands: str | None = None, decimal: str = ".", lineterminator: str | None = None, quotechar: str = '"', quoting: int = csv.QUOTE_MINIMAL, doublequote: bool = True, escapechar: str | None = None, comment: str | None = None, encoding: str | None = None, encoding_errors: str | None = "strict", dialect: str | csv.Dialect | None = None, # Error Handling on_bad_lines: str = "error", # Internal delim_whitespace: bool | lib.NoDefault = lib.no_default, low_memory: bool = _c_parser_defaults["low_memory"], memory_map: bool = False, float_precision: Literal["high", "legacy"] | None = None, storage_options: StorageOptions | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame | TextFileReader: if keep_date_col is not lib.no_default: # GH#55569 warnings.warn( "The 'keep_date_col' keyword in pd.read_csv is deprecated and " "will be removed in a future version. Explicitly remove unwanted " "columns after parsing instead.", FutureWarning, stacklevel=find_stack_level(), ) else: keep_date_col = False if lib.is_list_like(parse_dates): # GH#55569 depr = False # error: Item "bool" of "bool | Sequence[Hashable] | None" has no # attribute "__iter__" (not iterable) if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr] depr = True elif isinstance(parse_dates, dict) and any( lib.is_list_like(x) for x in parse_dates.values() ): depr = True if depr: warnings.warn( "Support for nested sequences for 'parse_dates' in pd.read_csv " "is deprecated. Combine the desired columns with pd.to_datetime " "after parsing instead.", FutureWarning, stacklevel=find_stack_level(), ) if infer_datetime_format is not lib.no_default: warnings.warn( "The argument 'infer_datetime_format' is deprecated and will " "be removed in a future version. " "A strict version of it is now the default, see " "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " "You can safely remove this argument.", FutureWarning, stacklevel=find_stack_level(), ) if delim_whitespace is not lib.no_default: # GH#55569 warnings.warn( "The 'delim_whitespace' keyword in pd.read_csv is deprecated and " "will be removed in a future version. Use ``sep='\\s+'`` instead", FutureWarning, stacklevel=find_stack_level(), ) else: delim_whitespace = False if verbose is not lib.no_default: # GH#55569 warnings.warn( "The 'verbose' keyword in pd.read_csv is deprecated and " "will be removed in a future version.", FutureWarning, stacklevel=find_stack_level(), ) else: verbose = False # locals() should never be modified kwds = locals().copy() del kwds["filepath_or_buffer"] del kwds["sep"] kwds_defaults = _refine_defaults_read( dialect, delimiter, delim_whitespace, engine, sep, on_bad_lines, names, defaults={"delimiter": ","}, dtype_backend=dtype_backend, ) kwds.update(kwds_defaults) return _read(filepath_or_buffer, kwds)
(filepath_or_buffer: 'FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str]', *, sep: 'str | None | lib.NoDefault' = <no_default>, delimiter: 'str | None | lib.NoDefault' = None, header: "int | Sequence[int] | None | Literal['infer']" = 'infer', names: 'Sequence[Hashable] | None | lib.NoDefault' = <no_default>, index_col: 'IndexLabel | Literal[False] | None' = None, usecols: 'UsecolsArgType' = None, dtype: 'DtypeArg | None' = None, engine: 'CSVEngine | None' = None, converters: 'Mapping[Hashable, Callable] | None' = None, true_values: 'list | None' = None, false_values: 'list | None' = None, skipinitialspace: 'bool' = False, skiprows: 'list[int] | int | Callable[[Hashable], bool] | None' = None, skipfooter: 'int' = 0, nrows: 'int | None' = None, na_values: 'Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None' = None, keep_default_na: 'bool' = True, na_filter: 'bool' = True, verbose: 'bool | lib.NoDefault' = <no_default>, skip_blank_lines: 'bool' = True, parse_dates: 'bool | Sequence[Hashable] | None' = None, infer_datetime_format: 'bool | lib.NoDefault' = <no_default>, keep_date_col: 'bool | lib.NoDefault' = <no_default>, date_parser: 'Callable | lib.NoDefault' = <no_default>, date_format: 'str | dict[Hashable, str] | None' = None, dayfirst: 'bool' = False, cache_dates: 'bool' = True, iterator: 'bool' = False, chunksize: 'int | None' = None, compression: 'CompressionOptions' = 'infer', thousands: 'str | None' = None, decimal: 'str' = '.', lineterminator: 'str | None' = None, quotechar: 'str' = '"', quoting: 'int' = 0, doublequote: 'bool' = True, escapechar: 'str | None' = None, comment: 'str | None' = None, encoding: 'str | None' = None, encoding_errors: 'str | None' = 'strict', dialect: 'str | csv.Dialect | None' = None, on_bad_lines: 'str' = 'error', delim_whitespace: 'bool | lib.NoDefault' = <no_default>, low_memory: 'bool' = True, memory_map: 'bool' = False, float_precision: "Literal['high', 'legacy'] | None" = None, storage_options: 'StorageOptions | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame | TextFileReader'
68,358
pandas.io.excel._base
read_excel
Read an Excel file into a ``pandas`` ``DataFrame``. Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions read from a local filesystem or URL. Supports an option to read a single sheet or a list of sheets. Parameters ---------- io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.xlsx``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. .. deprecated:: 2.1.0 Passing byte strings is deprecated. To read from a byte string, wrap it in a ``BytesIO`` object. sheet_name : str, int, list, or None, default 0 Strings are used for sheet names. Integers are used in zero-indexed sheet positions (chart sheets do not count as a sheet position). Lists of strings/integers are used to request multiple sheets. Specify ``None`` to get all worksheets. Available cases: * Defaults to ``0``: 1st sheet as a `DataFrame` * ``1``: 2nd sheet as a `DataFrame` * ``"Sheet1"``: Load sheet with name "Sheet1" * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" as a dict of `DataFrame` * ``None``: All worksheets. header : int, list of int, default 0 Row (0-indexed) to use for the column labels of the parsed DataFrame. If a list of integers is passed those row positions will be combined into a ``MultiIndex``. Use None if there is no header. names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None. index_col : int, str, list of int, default None Column (0-indexed) to use as the row labels of the DataFrame. Pass None if there is no such column. If a list is passed, those columns will be combined into a ``MultiIndex``. If a subset of data is selected with ``usecols``, index_col is based on the subset. Missing values will be forward filled to allow roundtripping with ``to_excel`` for ``merged_cells=True``. To avoid forward filling the missing values use ``set_index`` after reading the data instead of ``index_col``. usecols : str, list-like, or callable, default None * If None, then parse all columns. * If str, then indicates comma separated list of Excel column letters and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of both sides. * If list of int, then indicates list of column numbers to be parsed (0-indexed). * If list of string, then indicates list of column names to be parsed. * If callable, then evaluate each column name against it and parse the column if the callable returns ``True``. Returns a subset of the columns according to behavior above. dtype : Type name or dict of column -> type, default None Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} Use ``object`` to preserve data as stored in Excel and not interpret dtype, which will necessarily result in ``object`` dtype. If converters are specified, they will be applied INSTEAD of dtype conversion. If you use ``None``, it will infer the dtype of each column based on the data. engine : {'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}, default None If io is not a buffer or path, this must be set to identify io. Engine compatibility : - ``openpyxl`` supports newer Excel file formats. - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb) and OpenDocument (.ods) file formats. - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt). - ``pyxlsb`` supports Binary Excel files. - ``xlrd`` supports old-style Excel files (.xls). When ``engine=None``, the following logic will be used to determine the engine: - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt), then `odf <https://pypi.org/project/odfpy/>`_ will be used. - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used. - Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used. - Otherwise ``openpyxl`` will be used. converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the Excel cell content, and return the transformed content. true_values : list, default None Values to consider as True. false_values : list, default None Values to consider as False. skiprows : list-like, int, or callable, optional Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file. If callable, the callable function will be evaluated against the row indices, returning True if the row should be skipped and False otherwise. An example of a valid callable argument would be ``lambda x: x in [0, 2]``. nrows : int, default None Number of rows to parse. na_values : scalar, str, list-like, or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'None', 'n/a', 'nan', 'null'. keep_default_na : bool, default True Whether or not to include the default NaN values when parsing the data. Depending on whether ``na_values`` is passed in, the behavior is as follows: * If ``keep_default_na`` is True, and ``na_values`` are specified, ``na_values`` is appended to the default NaN values used for parsing. * If ``keep_default_na`` is True, and ``na_values`` are not specified, only the default NaN values are used for parsing. * If ``keep_default_na`` is False, and ``na_values`` are specified, only the NaN values specified ``na_values`` are used for parsing. * If ``keep_default_na`` is False, and ``na_values`` are not specified, no strings will be parsed as NaN. Note that if `na_filter` is passed in as False, the ``keep_default_na`` and ``na_values`` parameters will be ignored. na_filter : bool, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing ``na_filter=False`` can improve the performance of reading a large file. verbose : bool, default False Indicate number of NA values placed in non-numeric columns. parse_dates : bool, list-like, or dict, default False The behavior is as follows: * ``bool``. If True -> try parsing the index. * ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. * ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. * ``dict``, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result 'foo' If a column or index contains an unparsable date, the entire column or index will be returned unaltered as an object data type. If you don`t want to parse some cells as date just change their type in Excel to "Text". For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``. Note: A fast-path exists for iso8601-formatted dates. date_parser : function, optional Function to use for converting a sequence of string columns to an array of datetime instances. The default uses ``dateutil.parser.parser`` to do the conversion. Pandas will try to call `date_parser` in three different ways, advancing to the next if an exception occurs: 1) Pass one or more arrays (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the string values from the columns defined by `parse_dates` into a single array and pass that; and 3) call `date_parser` once for each row using one or more strings (corresponding to the columns defined by `parse_dates`) as arguments. .. deprecated:: 2.0.0 Use ``date_format`` instead, or read in as ``object`` and then apply :func:`to_datetime` as-needed. date_format : str or dict of column -> format, default ``None`` If used in conjunction with ``parse_dates``, will parse dates according to this format. For anything more complex, please read in as ``object`` and then apply :func:`to_datetime` as-needed. .. versionadded:: 2.0.0 thousands : str, default None Thousands separator for parsing string columns to numeric. Note that this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format. decimal : str, default '.' Character to recognize as decimal point for parsing string columns to numeric. Note that this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format.(e.g. use ',' for European data). .. versionadded:: 1.4.0 comment : str, default None Comments out remainder of line. Pass a character or characters to this argument to indicate comments in the input file. Any data between the comment string and the end of the current line is ignored. skipfooter : int, default 0 Rows at the end to skip (0-indexed). storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc. For HTTP(S) URLs the key-value pairs are forwarded to ``urllib.request.Request`` as header options. For other URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more details, and for more examples on storage options refer `here <https://pandas.pydata.org/docs/user_guide/io.html? highlight=storage_options#reading-writing-remote-files>`_. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine. Returns ------- DataFrame or dict of DataFrames DataFrame from the passed in Excel file. See notes in sheet_name argument for more information on when a dict of DataFrames is returned. See Also -------- DataFrame.to_excel : Write DataFrame to an Excel file. DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. read_fwf : Read a table of fixed-width formatted lines into DataFrame. Notes ----- For specific information on the methods used for each Excel engine, refer to the pandas :ref:`user guide <io.excel_reader>` Examples -------- The file can be read using the file name as string or an open file object: >>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP Name Value 0 string1 1 1 string2 2 2 #Comment 3 >>> pd.read_excel(open('tmp.xlsx', 'rb'), ... sheet_name='Sheet3') # doctest: +SKIP Unnamed: 0 Name Value 0 0 string1 1 1 1 string2 2 2 2 #Comment 3 Index and header can be specified via the `index_col` and `header` arguments >>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP 0 1 2 0 NaN Name Value 1 0.0 string1 1 2 1.0 string2 2 3 2.0 #Comment 3 Column types are inferred but can be explicitly specified >>> pd.read_excel('tmp.xlsx', index_col=0, ... dtype={'Name': str, 'Value': float}) # doctest: +SKIP Name Value 0 string1 1.0 1 string2 2.0 2 #Comment 3.0 True, False, and NA values, and thousands separators have defaults, but can be explicitly specified, too. Supply the values you would like as strings or lists of strings! >>> pd.read_excel('tmp.xlsx', index_col=0, ... na_values=['string1', 'string2']) # doctest: +SKIP Name Value 0 NaN 1 1 NaN 2 2 #Comment 3 Comment lines in the excel input file can be skipped using the ``comment`` kwarg. >>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP Name Value 0 string1 1.0 1 string2 2.0 2 None NaN
@doc(storage_options=_shared_docs["storage_options"]) @Appender(_read_excel_doc) def read_excel( io, sheet_name: str | int | list[IntStrT] | None = 0, *, header: int | Sequence[int] | None = 0, names: SequenceNotStr[Hashable] | range | None = None, index_col: int | str | Sequence[int] | None = None, usecols: int | str | Sequence[int] | Sequence[str] | Callable[[str], bool] | None = None, dtype: DtypeArg | None = None, engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None, converters: dict[str, Callable] | dict[int, Callable] | None = None, true_values: Iterable[Hashable] | None = None, false_values: Iterable[Hashable] | None = None, skiprows: Sequence[int] | int | Callable[[int], object] | None = None, nrows: int | None = None, na_values=None, keep_default_na: bool = True, na_filter: bool = True, verbose: bool = False, parse_dates: list | dict | bool = False, date_parser: Callable | lib.NoDefault = lib.no_default, date_format: dict[Hashable, str] | str | None = None, thousands: str | None = None, decimal: str = ".", comment: str | None = None, skipfooter: int = 0, storage_options: StorageOptions | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine_kwargs: dict | None = None, ) -> DataFrame | dict[IntStrT, DataFrame]: check_dtype_backend(dtype_backend) should_close = False if engine_kwargs is None: engine_kwargs = {} if not isinstance(io, ExcelFile): should_close = True io = ExcelFile( io, storage_options=storage_options, engine=engine, engine_kwargs=engine_kwargs, ) elif engine and engine != io.engine: raise ValueError( "Engine should not be specified when passing " "an ExcelFile - ExcelFile already has the engine set" ) try: data = io.parse( sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, dtype=dtype, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, keep_default_na=keep_default_na, na_filter=na_filter, verbose=verbose, parse_dates=parse_dates, date_parser=date_parser, date_format=date_format, thousands=thousands, decimal=decimal, comment=comment, skipfooter=skipfooter, dtype_backend=dtype_backend, ) finally: # make sure to close opened file handles if should_close: io.close() return data
(io, sheet_name: 'str | int | list[IntStrT] | None' = 0, *, header: 'int | Sequence[int] | None' = 0, names: 'SequenceNotStr[Hashable] | range | None' = None, index_col: 'int | str | Sequence[int] | None' = None, usecols: 'int | str | Sequence[int] | Sequence[str] | Callable[[str], bool] | None' = None, dtype: 'DtypeArg | None' = None, engine: "Literal['xlrd', 'openpyxl', 'odf', 'pyxlsb', 'calamine'] | None" = None, converters: 'dict[str, Callable] | dict[int, Callable] | None' = None, true_values: 'Iterable[Hashable] | None' = None, false_values: 'Iterable[Hashable] | None' = None, skiprows: 'Sequence[int] | int | Callable[[int], object] | None' = None, nrows: 'int | None' = None, na_values=None, keep_default_na: 'bool' = True, na_filter: 'bool' = True, verbose: 'bool' = False, parse_dates: 'list | dict | bool' = False, date_parser: 'Callable | lib.NoDefault' = <no_default>, date_format: 'dict[Hashable, str] | str | None' = None, thousands: 'str | None' = None, decimal: 'str' = '.', comment: 'str | None' = None, skipfooter: 'int' = 0, storage_options: 'StorageOptions | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, engine_kwargs: 'dict | None' = None) -> 'DataFrame | dict[IntStrT, DataFrame]'
68,359
pandas.io.feather_format
read_feather
Load a feather-format object from the file path. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.feather``. columns : sequence, default None If not provided, all columns are read. use_threads : bool, default True Whether to parallelize reading using multiple threads. storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc. For HTTP(S) URLs the key-value pairs are forwarded to ``urllib.request.Request`` as header options. For other URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more details, and for more examples on storage options refer `here <https://pandas.pydata.org/docs/user_guide/io.html? highlight=storage_options#reading-writing-remote-files>`_. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 Returns ------- type of object stored in file Examples -------- >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP
@doc(storage_options=_shared_docs["storage_options"]) def read_feather( path: FilePath | ReadBuffer[bytes], columns: Sequence[Hashable] | None = None, use_threads: bool = True, storage_options: StorageOptions | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame: """ Load a feather-format object from the file path. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.feather``. columns : sequence, default None If not provided, all columns are read. use_threads : bool, default True Whether to parallelize reading using multiple threads. {storage_options} dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 Returns ------- type of object stored in file Examples -------- >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP """ import_optional_dependency("pyarrow") from pyarrow import feather # import utils to register the pyarrow extension types import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401 check_dtype_backend(dtype_backend) with get_handle( path, "rb", storage_options=storage_options, is_text=False ) as handles: if dtype_backend is lib.no_default and not using_pyarrow_string_dtype(): return feather.read_feather( handles.handle, columns=columns, use_threads=bool(use_threads) ) pa_table = feather.read_table( handles.handle, columns=columns, use_threads=bool(use_threads) ) if dtype_backend == "numpy_nullable": from pandas.io._util import _arrow_dtype_mapping return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get) elif dtype_backend == "pyarrow": return pa_table.to_pandas(types_mapper=pd.ArrowDtype) elif using_pyarrow_string_dtype(): return pa_table.to_pandas(types_mapper=arrow_string_types_mapper()) else: raise NotImplementedError
(path: 'FilePath | ReadBuffer[bytes]', columns: 'Sequence[Hashable] | None' = None, use_threads: 'bool' = True, storage_options: 'StorageOptions | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame'
68,360
pandas.io.parsers.readers
read_fwf
Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a text ``read()`` function.The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.csv``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextFileReader A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP
def read_fwf( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, colspecs: Sequence[tuple[int, int]] | str | None = "infer", widths: Sequence[int] | None = None, infer_nrows: int = 100, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, iterator: bool = False, chunksize: int | None = None, **kwds, ) -> DataFrame | TextFileReader: r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a text ``read()`` function.The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.csv``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextFileReader A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP """ # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") if colspecs not in (None, "infer") and widths is not None: raise ValueError("You must specify only one of 'widths' and 'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w # for mypy assert colspecs is not None # GH#40830 # Ensure length of `colspecs` matches length of `names` names = kwds.get("names") if names is not None: if len(names) != len(colspecs) and colspecs != "infer": # need to check len(index_col) as it might contain # unnamed indices, in which case it's name is not required len_index = 0 if kwds.get("index_col") is not None: index_col: Any = kwds.get("index_col") if index_col is not False: if not is_list_like(index_col): len_index = 1 else: len_index = len(index_col) if kwds.get("usecols") is None and len(names) + len_index != len(colspecs): # If usecols is used colspec may be longer than names raise ValueError("Length of colspecs must match length of names") kwds["colspecs"] = colspecs kwds["infer_nrows"] = infer_nrows kwds["engine"] = "python-fwf" kwds["iterator"] = iterator kwds["chunksize"] = chunksize check_dtype_backend(dtype_backend) kwds["dtype_backend"] = dtype_backend return _read(filepath_or_buffer, kwds)
(filepath_or_buffer: 'FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str]', *, colspecs: 'Sequence[tuple[int, int]] | str | None' = 'infer', widths: 'Sequence[int] | None' = None, infer_nrows: 'int' = 100, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, iterator: 'bool' = False, chunksize: 'int | None' = None, **kwds) -> 'DataFrame | TextFileReader'
68,361
pandas.io.gbq
read_gbq
Load data from Google BigQuery. .. deprecated:: 2.2.0 Please use ``pandas_gbq.read_gbq`` instead. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- query : str SQL-Like Query to return data values. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. index_col : str, optional Name of result column to use for index in results DataFrame. col_order : list(str), optional List of BigQuery column names in the desired order for results DataFrame. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. dialect : str, default 'legacy' Note: The default value is changing to 'standard' in a future version. SQL syntax dialect to use. Value can be one of: ``'legacy'`` Use BigQuery's legacy SQL dialect. For more information see `BigQuery Legacy SQL Reference <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. ``'standard'`` Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery Standard SQL Reference <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. location : str, optional Location where the query job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of any datasets used in the query. *New in version 0.5.0 of pandas-gbq*. configuration : dict, optional Query config parameters for job processing. For example: configuration = {'query': {'useQueryCache': False}} For more information see `BigQuery REST API Reference <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. use_bqstorage_api : bool, default False Use the `BigQuery Storage API <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to download query results quickly, but at an increased cost. To use this API, first `enable it in the Cloud Console <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__. You must also have the `bigquery.readsessions.create <https://cloud.google.com/bigquery/docs/access-control#roles>`__ permission on the project you are billing queries to. This feature requires version 0.10.0 or later of the ``pandas-gbq`` package. It also requires the ``google-cloud-bigquery-storage`` and ``fastavro`` packages. max_results : int, optional If set, limit the maximum number of rows to fetch from the query results. progress_bar_type : Optional, str If set, use the `tqdm <https://tqdm.github.io/>`__ library to display a progress bar while the data downloads. Install the ``tqdm`` package to use this feature. Possible values of ``progress_bar_type`` include: ``None`` No progress bar. ``'tqdm'`` Use the :func:`tqdm.tqdm` function to print a progress bar to :data:`sys.stderr`. ``'tqdm_notebook'`` Use the :func:`tqdm.tqdm_notebook` function to display a progress bar as a Jupyter notebook widget. ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. Returns ------- df: DataFrame DataFrame representing results of query. See Also -------- pandas_gbq.read_gbq : This function in the pandas-gbq library. DataFrame.to_gbq : Write a DataFrame to Google BigQuery. Examples -------- Example taken from `Google BigQuery documentation <https://cloud.google.com/bigquery/docs/pandas-gbq-migration>`_ >>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;" >>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP >>> project_id = "your-project-id" # doctest: +SKIP >>> df = pd.read_gbq(sql, ... project_id=project_id, ... dialect="standard" ... ) # doctest: +SKIP
def read_gbq( query: str, project_id: str | None = None, index_col: str | None = None, col_order: list[str] | None = None, reauth: bool = False, auth_local_webserver: bool = True, dialect: str | None = None, location: str | None = None, configuration: dict[str, Any] | None = None, credentials: google.auth.credentials.Credentials | None = None, use_bqstorage_api: bool | None = None, max_results: int | None = None, progress_bar_type: str | None = None, ) -> DataFrame: """ Load data from Google BigQuery. .. deprecated:: 2.2.0 Please use ``pandas_gbq.read_gbq`` instead. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- query : str SQL-Like Query to return data values. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. index_col : str, optional Name of result column to use for index in results DataFrame. col_order : list(str), optional List of BigQuery column names in the desired order for results DataFrame. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. dialect : str, default 'legacy' Note: The default value is changing to 'standard' in a future version. SQL syntax dialect to use. Value can be one of: ``'legacy'`` Use BigQuery's legacy SQL dialect. For more information see `BigQuery Legacy SQL Reference <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. ``'standard'`` Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery Standard SQL Reference <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. location : str, optional Location where the query job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of any datasets used in the query. *New in version 0.5.0 of pandas-gbq*. configuration : dict, optional Query config parameters for job processing. For example: configuration = {'query': {'useQueryCache': False}} For more information see `BigQuery REST API Reference <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. use_bqstorage_api : bool, default False Use the `BigQuery Storage API <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to download query results quickly, but at an increased cost. To use this API, first `enable it in the Cloud Console <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__. You must also have the `bigquery.readsessions.create <https://cloud.google.com/bigquery/docs/access-control#roles>`__ permission on the project you are billing queries to. This feature requires version 0.10.0 or later of the ``pandas-gbq`` package. It also requires the ``google-cloud-bigquery-storage`` and ``fastavro`` packages. max_results : int, optional If set, limit the maximum number of rows to fetch from the query results. progress_bar_type : Optional, str If set, use the `tqdm <https://tqdm.github.io/>`__ library to display a progress bar while the data downloads. Install the ``tqdm`` package to use this feature. Possible values of ``progress_bar_type`` include: ``None`` No progress bar. ``'tqdm'`` Use the :func:`tqdm.tqdm` function to print a progress bar to :data:`sys.stderr`. ``'tqdm_notebook'`` Use the :func:`tqdm.tqdm_notebook` function to display a progress bar as a Jupyter notebook widget. ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. Returns ------- df: DataFrame DataFrame representing results of query. See Also -------- pandas_gbq.read_gbq : This function in the pandas-gbq library. DataFrame.to_gbq : Write a DataFrame to Google BigQuery. Examples -------- Example taken from `Google BigQuery documentation <https://cloud.google.com/bigquery/docs/pandas-gbq-migration>`_ >>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;" >>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP >>> project_id = "your-project-id" # doctest: +SKIP >>> df = pd.read_gbq(sql, ... project_id=project_id, ... dialect="standard" ... ) # doctest: +SKIP """ warnings.warn( "read_gbq is deprecated and will be removed in a future version. " "Please use pandas_gbq.read_gbq instead: " "https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.read_gbq", FutureWarning, stacklevel=find_stack_level(), ) pandas_gbq = _try_import() kwargs: dict[str, str | bool | int | None] = {} # START: new kwargs. Don't populate unless explicitly set. if use_bqstorage_api is not None: kwargs["use_bqstorage_api"] = use_bqstorage_api if max_results is not None: kwargs["max_results"] = max_results kwargs["progress_bar_type"] = progress_bar_type # END: new kwargs return pandas_gbq.read_gbq( query, project_id=project_id, index_col=index_col, col_order=col_order, reauth=reauth, auth_local_webserver=auth_local_webserver, dialect=dialect, location=location, configuration=configuration, credentials=credentials, **kwargs, )
(query: 'str', project_id: 'str | None' = None, index_col: 'str | None' = None, col_order: 'list[str] | None' = None, reauth: 'bool' = False, auth_local_webserver: 'bool' = True, dialect: 'str | None' = None, location: 'str | None' = None, configuration: 'dict[str, Any] | None' = None, credentials: 'google.auth.credentials.Credentials | None' = None, use_bqstorage_api: 'bool | None' = None, max_results: 'int | None' = None, progress_bar_type: 'str | None' = None) -> 'DataFrame'
68,362
pandas.io.pytables
read_hdf
Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path_or_buf : str, path object, pandas.HDFStore Any valid string path is acceptable. Only supports the local file system, remote URLs and file-like objects are not supported. If you want to pass in a path object, pandas accepts any ``os.PathLike``. Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP
def read_hdf( path_or_buf: FilePath | HDFStore, key=None, mode: str = "r", errors: str = "strict", where: str | list | None = None, start: int | None = None, stop: int | None = None, columns: list[str] | None = None, iterator: bool = False, chunksize: int | None = None, **kwargs, ): """ Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path_or_buf : str, path object, pandas.HDFStore Any valid string path is acceptable. Only supports the local file system, remote URLs and file-like objects are not supported. If you want to pass in a path object, pandas accepts any ``os.PathLike``. Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP """ if mode not in ["r", "r+", "a"]: raise ValueError( f"mode {mode} is not allowed while performing a read. " f"Allowed modes are r, r+ and a." ) # grab the scope if where is not None: where = _ensure_term(where, scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise OSError("The HDFStore must be open for reading.") store = path_or_buf auto_close = False else: path_or_buf = stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError( "Support for generic buffers has not been implemented." ) try: exists = os.path.exists(path_or_buf) # if filepath is too long except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError(f"File {path_or_buf} does not exist") store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs) # can't auto open/close if we are using an iterator # so delegate to the iterator auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError( "Dataset(s) incompatible with Pandas data types, " "not table, or no datasets found in HDF5 file." ) candidate_only_group = groups[0] # For the HDF file to have only one dataset, all other groups # should then be metadata groups for that candidate group. (This # assumes that the groups() method enumerates parent groups # before their children.) for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError( "key must be provided when HDF5 " "file contains multiple datasets." ) key = candidate_only_group._v_pathname return store.select( key, where=where, start=start, stop=stop, columns=columns, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) except (ValueError, TypeError, LookupError): if not isinstance(path_or_buf, HDFStore): # if there is an error, close the store if we opened it. with suppress(AttributeError): store.close() raise
(path_or_buf: 'FilePath | HDFStore', key=None, mode: 'str' = 'r', errors: 'str' = 'strict', where: 'str | list | None' = None, start: 'int | None' = None, stop: 'int | None' = None, columns: 'list[str] | None' = None, iterator: 'bool' = False, chunksize: 'int | None' = None, **kwargs)
68,363
pandas.io.html
read_html
Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a string ``read()`` function. The string can represent a URL or the HTML itself. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. .. deprecated:: 2.1.0 Passing html literal strings is deprecated. Wrap literal string/bytes input in ``io.StringIO``/``io.BytesIO`` instead. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : {"lxml", "html5lib", "bs4"} or list-like, optional The parsing engine (or list of parsing engines) to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like, optional The column (or list of columns) to use to create the index. skiprows : int, list-like or slice, optional Number of rows to skip after parsing the column integer. 0-based. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {'id': 'table'} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. :: attrs = {'asdf': 'table'} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. na_values : iterable, default None Custom NA values. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to. displayed_only : bool, default True Whether elements with "display: none" should be parsed. extract_links : {None, "all", "header", "body", "footer"} Table elements in the specified section(s) with <a> tags will have their href extracted. .. versionadded:: 1.5.0 dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc. For HTTP(S) URLs the key-value pairs are forwarded to ``urllib.request.Request`` as header options. For other URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more details, and for more examples on storage options refer `here <https://pandas.pydata.org/docs/user_guide/io.html? highlight=storage_options#reading-writing-remote-files>`_. .. versionadded:: 2.1.0 Returns ------- dfs A list of DataFrames. See Also -------- read_csv : Read a comma-separated values (csv) file into DataFrame. Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables.
@doc(storage_options=_shared_docs["storage_options"]) def read_html( io: FilePath | ReadBuffer[str], *, match: str | Pattern = ".+", flavor: HTMLFlavors | Sequence[HTMLFlavors] | None = None, header: int | Sequence[int] | None = None, index_col: int | Sequence[int] | None = None, skiprows: int | Sequence[int] | slice | None = None, attrs: dict[str, str] | None = None, parse_dates: bool = False, thousands: str | None = ",", encoding: str | None = None, decimal: str = ".", converters: dict | None = None, na_values: Iterable[object] | None = None, keep_default_na: bool = True, displayed_only: bool = True, extract_links: Literal[None, "header", "footer", "body", "all"] = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, storage_options: StorageOptions = None, ) -> list[DataFrame]: r""" Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a string ``read()`` function. The string can represent a URL or the HTML itself. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. .. deprecated:: 2.1.0 Passing html literal strings is deprecated. Wrap literal string/bytes input in ``io.StringIO``/``io.BytesIO`` instead. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : {{"lxml", "html5lib", "bs4"}} or list-like, optional The parsing engine (or list of parsing engines) to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like, optional The column (or list of columns) to use to create the index. skiprows : int, list-like or slice, optional Number of rows to skip after parsing the column integer. 0-based. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {{'id': 'table'}} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. :: attrs = {{'asdf': 'table'}} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. na_values : iterable, default None Custom NA values. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to. displayed_only : bool, default True Whether elements with "display: none" should be parsed. extract_links : {{None, "all", "header", "body", "footer"}} Table elements in the specified section(s) with <a> tags will have their href extracted. .. versionadded:: 1.5.0 dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 {storage_options} .. versionadded:: 2.1.0 Returns ------- dfs A list of DataFrames. See Also -------- read_csv : Read a comma-separated values (csv) file into DataFrame. Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables. """ # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows. if isinstance(skiprows, numbers.Integral) and skiprows < 0: raise ValueError( "cannot skip rows starting from the end of the " "data (you passed a negative value)" ) if extract_links not in [None, "header", "footer", "body", "all"]: raise ValueError( "`extract_links` must be one of " '{None, "header", "footer", "body", "all"}, got ' f'"{extract_links}"' ) validate_header_arg(header) check_dtype_backend(dtype_backend) io = stringify_path(io) if isinstance(io, str) and not any( [ is_file_like(io), file_exists(io), is_url(io), is_fsspec_url(io), ] ): warnings.warn( "Passing literal html to 'read_html' is deprecated and " "will be removed in a future version. To read from a " "literal string, wrap it in a 'StringIO' object.", FutureWarning, stacklevel=find_stack_level(), ) return _parse( flavor=flavor, io=io, match=match, header=header, index_col=index_col, skiprows=skiprows, parse_dates=parse_dates, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, keep_default_na=keep_default_na, displayed_only=displayed_only, extract_links=extract_links, dtype_backend=dtype_backend, storage_options=storage_options, )
(io: 'FilePath | ReadBuffer[str]', *, match: 'str | Pattern' = '.+', flavor: 'HTMLFlavors | Sequence[HTMLFlavors] | None' = None, header: 'int | Sequence[int] | None' = None, index_col: 'int | Sequence[int] | None' = None, skiprows: 'int | Sequence[int] | slice | None' = None, attrs: 'dict[str, str] | None' = None, parse_dates: 'bool' = False, thousands: 'str | None' = ',', encoding: 'str | None' = None, decimal: 'str' = '.', converters: 'dict | None' = None, na_values: 'Iterable[object] | None' = None, keep_default_na: 'bool' = True, displayed_only: 'bool' = True, extract_links: "Literal[None, 'header', 'footer', 'body', 'all']" = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, storage_options: 'StorageOptions' = None) -> 'list[DataFrame]'
68,364
pandas.io.json._json
read_json
Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.json``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. .. deprecated:: 2.1.0 Passing json literal strings is deprecated. orient : str, optional Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{index -> [index], columns -> [columns], data -> [values]}`` - ``'records'`` : list like ``[{column -> value}, ... , {column -> value}]`` - ``'index'`` : dict like ``{index -> {column -> value}}`` - ``'columns'`` : dict like ``{column -> {index -> value}}`` - ``'values'`` : just the values array - ``'table'`` : dict like ``{'schema': {schema}, 'data': {data}}`` The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{'split','records','index'}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{'split','records','index', 'columns','values', 'table'}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. typ : {'frame', 'series'}, default 'frame' The type of object to recover. dtype : bool or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. convert_axes : bool, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. convert_dates : bool or list of str, default True If True then default datelike columns may be converted (depending on keep_default_dates). If False, no dates will be converted. If a list of column names, then those columns will be converted and default datelike columns may also be converted (depending on keep_default_dates). keep_default_dates : bool, default True If parsing dates (convert_dates is not False), then try to parse the default datelike columns. A column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'``. precise_float : bool, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality. date_unit : str, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. encoding_errors : str, optional, default "strict" How encoding errors are treated. `List of possible values <https://docs.python.org/3/library/codecs.html#error-handlers>`_ . .. versionadded:: 1.3.0 lines : bool, default False Read the file as a json object per line. chunksize : int, optional Return JsonReader object for iteration. See the `line-delimited json docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. compression : str or dict, default 'infer' For on-the-fly decompression of on-disk data. If 'infer' and 'path_or_buf' is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2' (otherwise no compression). If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in. Set to ``None`` for no decompression. Can also be a dict with key ``'method'`` set to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and other key-value pairs are forwarded to ``zipfile.ZipFile``, ``gzip.GzipFile``, ``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or ``tarfile.TarFile``, respectively. As an example, the following could be passed for Zstandard decompression using a custom compression dictionary: ``compression={'method': 'zstd', 'dict_data': my_compression_dict}``. .. versionadded:: 1.5.0 Added support for `.tar` files. .. versionchanged:: 1.4.0 Zstandard support. nrows : int, optional The number of lines from the line-delimited jsonfile that has to be read. This can only be passed if `lines=True`. If this is None, all the rows will be returned. storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc. For HTTP(S) URLs the key-value pairs are forwarded to ``urllib.request.Request`` as header options. For other URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more details, and for more examples on storage options refer `here <https://pandas.pydata.org/docs/user_guide/io.html? highlight=storage_options#reading-writing-remote-files>`_. dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 engine : {"ujson", "pyarrow"}, default "ujson" Parser engine to use. The ``"pyarrow"`` engine is only available when ``lines=True``. .. versionadded:: 2.0 Returns ------- Series, DataFrame, or pandas.api.typing.JsonReader A JsonReader is returned when ``chunksize`` is not ``0`` or ``None``. Otherwise, the type returned depends on the value of ``typ``. See Also -------- DataFrame.to_json : Convert a DataFrame to a JSON string. Series.to_json : Convert a Series to a JSON string. json_normalize : Normalize semi-structured JSON data into a flat table. Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> from io import StringIO >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '{"columns":["col 1","col 2"],"index":["row 1","row 2"],"data":[["a","b"],["c","d"]]}' >>> pd.read_json(StringIO(_), orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' >>> pd.read_json(StringIO(_), orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' >>> pd.read_json(StringIO(_), orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '{"schema":{"fields":[{"name":"index","type":"string"},{"name":"col 1","type":"string"},{"name":"col 2","type":"string"}],"primaryKey":["index"],"pandas_version":"1.4.0"},"data":[{"index":"row 1","col 1":"a","col 2":"b"},{"index":"row 2","col 1":"c","col 2":"d"}]}' The following example uses ``dtype_backend="numpy_nullable"`` >>> data = '''{"index": {"0": 0, "1": 1}, ... "a": {"0": 1, "1": null}, ... "b": {"0": 2.5, "1": 4.5}, ... "c": {"0": true, "1": false}, ... "d": {"0": "a", "1": "b"}, ... "e": {"0": 1577.2, "1": 1577.1}}''' >>> pd.read_json(StringIO(data), dtype_backend="numpy_nullable") index a b c d e 0 0 1 2.5 True a 1577.2 1 1 <NA> 4.5 False b 1577.1
@doc( storage_options=_shared_docs["storage_options"], decompression_options=_shared_docs["decompression_options"] % "path_or_buf", ) def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = None, typ: Literal["frame", "series"] = "frame", dtype: DtypeArg | None = None, convert_axes: bool | None = None, convert_dates: bool | list[str] = True, keep_default_dates: bool = True, precise_float: bool = False, date_unit: str | None = None, encoding: str | None = None, encoding_errors: str | None = "strict", lines: bool = False, chunksize: int | None = None, compression: CompressionOptions = "infer", nrows: int | None = None, storage_options: StorageOptions | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine: JSONEngine = "ujson", ) -> DataFrame | Series | JsonReader: """ Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.json``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. .. deprecated:: 2.1.0 Passing json literal strings is deprecated. orient : str, optional Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{{index -> [index], columns -> [columns], data -> [values]}}`` - ``'records'`` : list like ``[{{column -> value}}, ... , {{column -> value}}]`` - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` - ``'values'`` : just the values array - ``'table'`` : dict like ``{{'schema': {{schema}}, 'data': {{data}}}}`` The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{{'split','records','index'}}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{{'split','records','index', 'columns','values', 'table'}}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. typ : {{'frame', 'series'}}, default 'frame' The type of object to recover. dtype : bool or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. convert_axes : bool, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. convert_dates : bool or list of str, default True If True then default datelike columns may be converted (depending on keep_default_dates). If False, no dates will be converted. If a list of column names, then those columns will be converted and default datelike columns may also be converted (depending on keep_default_dates). keep_default_dates : bool, default True If parsing dates (convert_dates is not False), then try to parse the default datelike columns. A column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'``. precise_float : bool, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality. date_unit : str, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. encoding_errors : str, optional, default "strict" How encoding errors are treated. `List of possible values <https://docs.python.org/3/library/codecs.html#error-handlers>`_ . .. versionadded:: 1.3.0 lines : bool, default False Read the file as a json object per line. chunksize : int, optional Return JsonReader object for iteration. See the `line-delimited json docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. {decompression_options} .. versionchanged:: 1.4.0 Zstandard support. nrows : int, optional The number of lines from the line-delimited jsonfile that has to be read. This can only be passed if `lines=True`. If this is None, all the rows will be returned. {storage_options} dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' Back-end data type applied to the resultant :class:`DataFrame` (still experimental). Behaviour is as follows: * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` (default). * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` DataFrame. .. versionadded:: 2.0 engine : {{"ujson", "pyarrow"}}, default "ujson" Parser engine to use. The ``"pyarrow"`` engine is only available when ``lines=True``. .. versionadded:: 2.0 Returns ------- Series, DataFrame, or pandas.api.typing.JsonReader A JsonReader is returned when ``chunksize`` is not ``0`` or ``None``. Otherwise, the type returned depends on the value of ``typ``. See Also -------- DataFrame.to_json : Convert a DataFrame to a JSON string. Series.to_json : Convert a Series to a JSON string. json_normalize : Normalize semi-structured JSON data into a flat table. Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> from io import StringIO >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '\ {{\ "columns":["col 1","col 2"],\ "index":["row 1","row 2"],\ "data":[["a","b"],["c","d"]]\ }}\ ' >>> pd.read_json(StringIO(_), orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' >>> pd.read_json(StringIO(_), orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' >>> pd.read_json(StringIO(_), orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '\ {{"schema":{{"fields":[\ {{"name":"index","type":"string"}},\ {{"name":"col 1","type":"string"}},\ {{"name":"col 2","type":"string"}}],\ "primaryKey":["index"],\ "pandas_version":"1.4.0"}},\ "data":[\ {{"index":"row 1","col 1":"a","col 2":"b"}},\ {{"index":"row 2","col 1":"c","col 2":"d"}}]\ }}\ ' The following example uses ``dtype_backend="numpy_nullable"`` >>> data = '''{{"index": {{"0": 0, "1": 1}}, ... "a": {{"0": 1, "1": null}}, ... "b": {{"0": 2.5, "1": 4.5}}, ... "c": {{"0": true, "1": false}}, ... "d": {{"0": "a", "1": "b"}}, ... "e": {{"0": 1577.2, "1": 1577.1}}}}''' >>> pd.read_json(StringIO(data), dtype_backend="numpy_nullable") index a b c d e 0 0 1 2.5 True a 1577.2 1 1 <NA> 4.5 False b 1577.1 """ if orient == "table" and dtype: raise ValueError("cannot pass both dtype and orient='table'") if orient == "table" and convert_axes: raise ValueError("cannot pass both convert_axes and orient='table'") check_dtype_backend(dtype_backend) if dtype is None and orient != "table": # error: Incompatible types in assignment (expression has type "bool", variable # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable, # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], # Type[int], Type[complex], Type[bool], Type[object]]], None]") dtype = True # type: ignore[assignment] if convert_axes is None and orient != "table": convert_axes = True json_reader = JsonReader( path_or_buf, orient=orient, typ=typ, dtype=dtype, convert_axes=convert_axes, convert_dates=convert_dates, keep_default_dates=keep_default_dates, precise_float=precise_float, date_unit=date_unit, encoding=encoding, lines=lines, chunksize=chunksize, compression=compression, nrows=nrows, storage_options=storage_options, encoding_errors=encoding_errors, dtype_backend=dtype_backend, engine=engine, ) if chunksize: return json_reader else: return json_reader.read()
(path_or_buf: 'FilePath | ReadBuffer[str] | ReadBuffer[bytes]', *, orient: 'str | None' = None, typ: "Literal['frame', 'series']" = 'frame', dtype: 'DtypeArg | None' = None, convert_axes: 'bool | None' = None, convert_dates: 'bool | list[str]' = True, keep_default_dates: 'bool' = True, precise_float: 'bool' = False, date_unit: 'str | None' = None, encoding: 'str | None' = None, encoding_errors: 'str | None' = 'strict', lines: 'bool' = False, chunksize: 'int | None' = None, compression: 'CompressionOptions' = 'infer', nrows: 'int | None' = None, storage_options: 'StorageOptions | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, engine: 'JSONEngine' = 'ujson') -> 'DataFrame | Series | JsonReader'