index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
66,256 |
pandas.core.dtypes.dtypes
|
__hash__
| null |
def __hash__(self) -> int:
# make myself hashable
# TODO: update this.
return hash(str(self))
|
(self) -> int
|
66,257 |
pandas.core.dtypes.dtypes
|
__init__
| null |
def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None:
if isinstance(unit, DatetimeTZDtype):
# error: "str" has no attribute "tz"
unit, tz = unit.unit, unit.tz # type: ignore[attr-defined]
if unit != "ns":
if isinstance(unit, str) and tz is None:
# maybe a string like datetime64[ns, tz], which we support for
# now.
result = type(self).construct_from_string(unit)
unit = result.unit
tz = result.tz
msg = (
f"Passing a dtype alias like 'datetime64[ns, {tz}]' "
"to DatetimeTZDtype is no longer supported. Use "
"'DatetimeTZDtype.construct_from_string()' instead."
)
raise ValueError(msg)
if unit not in ["s", "ms", "us", "ns"]:
raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units")
if tz:
tz = timezones.maybe_get_tz(tz)
tz = timezones.tz_standardize(tz)
elif tz is not None:
raise pytz.UnknownTimeZoneError(tz)
if tz is None:
raise TypeError("A 'tz' is required.")
self._unit = unit
self._tz = tz
|
(self, unit: str | pandas.core.dtypes.dtypes.DatetimeTZDtype = 'ns', tz=None) -> NoneType
|
66,259 |
pandas.core.dtypes.dtypes
|
__repr__
|
Return a string representation for a particular object.
|
def __repr__(self) -> str_type:
"""
Return a string representation for a particular object.
"""
return str(self)
|
(self) -> str
|
66,260 |
pandas.core.dtypes.dtypes
|
__setstate__
| null |
def __setstate__(self, state) -> None:
# for pickle compat. __get_state__ is defined in the
# PandasExtensionDtype superclass and uses the public properties to
# pickle -> need to set the settable private ones here (see GH26067)
self._tz = state["tz"]
self._unit = state["unit"]
|
(self, state) -> NoneType
|
66,261 |
pandas.core.dtypes.dtypes
|
__str__
| null |
def __str__(self) -> str_type:
return f"datetime64[{self.unit}, {self.tz}]"
|
(self) -> str
|
66,262 |
pandas.core.dtypes.dtypes
|
_get_common_dtype
| null |
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes):
np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]])
unit = np.datetime_data(np_dtype)[0]
return type(self)(unit=unit, tz=self.tz)
return super()._get_common_dtype(dtypes)
|
(self, dtypes: 'list[DtypeObj]') -> 'DtypeObj | None'
|
66,264 |
pandas.io.excel._base
|
ExcelFile
|
Class for parsing tabular Excel sheets into DataFrame objects.
See read_excel for more documentation.
Parameters
----------
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
A file-like object, xlrd workbook or openpyxl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
Engine compatibility :
- ``xlrd`` supports old-style Excel files (.xls).
- ``openpyxl`` supports newer Excel file formats.
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
- ``pyxlsb`` supports Binary Excel files.
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
and OpenDocument (.ods) file formats.
.. versionchanged:: 1.2.0
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
.. versionadded:: 1.3.0
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
then ``openpyxl`` will be used.
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
.. warning::
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
This is not supported, switch to using ``openpyxl`` instead.
engine_kwargs : dict, optional
Arbitrary keyword arguments passed to excel engine.
Examples
--------
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
|
class ExcelFile:
"""
Class for parsing tabular Excel sheets into DataFrame objects.
See read_excel for more documentation.
Parameters
----------
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
A file-like object, xlrd workbook or openpyxl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
Engine compatibility :
- ``xlrd`` supports old-style Excel files (.xls).
- ``openpyxl`` supports newer Excel file formats.
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
- ``pyxlsb`` supports Binary Excel files.
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
and OpenDocument (.ods) file formats.
.. versionchanged:: 1.2.0
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
.. versionadded:: 1.3.0
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
then ``openpyxl`` will be used.
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
.. warning::
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
This is not supported, switch to using ``openpyxl`` instead.
engine_kwargs : dict, optional
Arbitrary keyword arguments passed to excel engine.
Examples
--------
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
"""
from pandas.io.excel._calamine import CalamineReader
from pandas.io.excel._odfreader import ODFReader
from pandas.io.excel._openpyxl import OpenpyxlReader
from pandas.io.excel._pyxlsb import PyxlsbReader
from pandas.io.excel._xlrd import XlrdReader
_engines: Mapping[str, Any] = {
"xlrd": XlrdReader,
"openpyxl": OpenpyxlReader,
"odf": ODFReader,
"pyxlsb": PyxlsbReader,
"calamine": CalamineReader,
}
def __init__(
self,
path_or_buffer,
engine: str | None = None,
storage_options: StorageOptions | None = None,
engine_kwargs: dict | None = None,
) -> None:
if engine_kwargs is None:
engine_kwargs = {}
if engine is not None and engine not in self._engines:
raise ValueError(f"Unknown engine: {engine}")
# First argument can also be bytes, so create a buffer
if isinstance(path_or_buffer, bytes):
path_or_buffer = BytesIO(path_or_buffer)
warnings.warn(
"Passing bytes to 'read_excel' is deprecated and "
"will be removed in a future version. To read from a "
"byte string, wrap it in a `BytesIO` object.",
FutureWarning,
stacklevel=find_stack_level(),
)
# Could be a str, ExcelFile, Book, etc.
self.io = path_or_buffer
# Always a string
self._io = stringify_path(path_or_buffer)
# Determine xlrd version if installed
if import_optional_dependency("xlrd", errors="ignore") is None:
xlrd_version = None
else:
import xlrd
xlrd_version = Version(get_version(xlrd))
if engine is None:
# Only determine ext if it is needed
ext: str | None
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
ext = inspect_excel_format(
content_or_path=path_or_buffer, storage_options=storage_options
)
if ext is None:
raise ValueError(
"Excel file format cannot be determined, you must specify "
"an engine manually."
)
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="reader")
assert engine is not None
self.engine = engine
self.storage_options = storage_options
self._reader = self._engines[engine](
self._io,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
def __fspath__(self):
return self._io
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
header: int | Sequence[int] | None = 0,
names: SequenceNotStr[Hashable] | range | None = None,
index_col: int | Sequence[int] | None = None,
usecols=None,
converters=None,
true_values: Iterable[Hashable] | None = None,
false_values: Iterable[Hashable] | None = None,
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
nrows: int | None = None,
na_values=None,
parse_dates: list | dict | bool = False,
date_parser: Callable | lib.NoDefault = lib.no_default,
date_format: str | dict[Hashable, str] | None = None,
thousands: str | None = None,
comment: str | None = None,
skipfooter: int = 0,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
**kwds,
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
"""
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_excel('myfile.xlsx') # doctest: +SKIP
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
>>> file.parse() # doctest: +SKIP
"""
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
date_format=date_format,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
dtype_backend=dtype_backend,
**kwds,
)
@property
def book(self):
return self._reader.book
@property
def sheet_names(self):
return self._reader.sheet_names
def close(self) -> None:
"""close io if necessary"""
self._reader.close()
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
|
(path_or_buffer, engine: 'str | None' = None, storage_options: 'StorageOptions | None' = None, engine_kwargs: 'dict | None' = None) -> 'None'
|
66,266 |
pandas.io.excel._base
|
__exit__
| null |
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
|
(self, exc_type: 'type[BaseException] | None', exc_value: 'BaseException | None', traceback: 'TracebackType | None') -> 'None'
|
66,267 |
pandas.io.excel._base
|
__fspath__
| null |
def __fspath__(self):
return self._io
|
(self)
|
66,268 |
pandas.io.excel._base
|
__init__
| null |
def __init__(
self,
path_or_buffer,
engine: str | None = None,
storage_options: StorageOptions | None = None,
engine_kwargs: dict | None = None,
) -> None:
if engine_kwargs is None:
engine_kwargs = {}
if engine is not None and engine not in self._engines:
raise ValueError(f"Unknown engine: {engine}")
# First argument can also be bytes, so create a buffer
if isinstance(path_or_buffer, bytes):
path_or_buffer = BytesIO(path_or_buffer)
warnings.warn(
"Passing bytes to 'read_excel' is deprecated and "
"will be removed in a future version. To read from a "
"byte string, wrap it in a `BytesIO` object.",
FutureWarning,
stacklevel=find_stack_level(),
)
# Could be a str, ExcelFile, Book, etc.
self.io = path_or_buffer
# Always a string
self._io = stringify_path(path_or_buffer)
# Determine xlrd version if installed
if import_optional_dependency("xlrd", errors="ignore") is None:
xlrd_version = None
else:
import xlrd
xlrd_version = Version(get_version(xlrd))
if engine is None:
# Only determine ext if it is needed
ext: str | None
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
ext = inspect_excel_format(
content_or_path=path_or_buffer, storage_options=storage_options
)
if ext is None:
raise ValueError(
"Excel file format cannot be determined, you must specify "
"an engine manually."
)
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="reader")
assert engine is not None
self.engine = engine
self.storage_options = storage_options
self._reader = self._engines[engine](
self._io,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
|
(self, path_or_buffer, engine: 'str | None' = None, storage_options: 'StorageOptions | None' = None, engine_kwargs: 'dict | None' = None) -> 'None'
|
66,269 |
pandas.io.excel._base
|
close
|
close io if necessary
|
def close(self) -> None:
"""close io if necessary"""
self._reader.close()
|
(self) -> NoneType
|
66,270 |
pandas.io.excel._base
|
parse
|
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_excel('myfile.xlsx') # doctest: +SKIP
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
>>> file.parse() # doctest: +SKIP
|
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
header: int | Sequence[int] | None = 0,
names: SequenceNotStr[Hashable] | range | None = None,
index_col: int | Sequence[int] | None = None,
usecols=None,
converters=None,
true_values: Iterable[Hashable] | None = None,
false_values: Iterable[Hashable] | None = None,
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
nrows: int | None = None,
na_values=None,
parse_dates: list | dict | bool = False,
date_parser: Callable | lib.NoDefault = lib.no_default,
date_format: str | dict[Hashable, str] | None = None,
thousands: str | None = None,
comment: str | None = None,
skipfooter: int = 0,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
**kwds,
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
"""
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_excel('myfile.xlsx') # doctest: +SKIP
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
>>> file.parse() # doctest: +SKIP
"""
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
date_format=date_format,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
dtype_backend=dtype_backend,
**kwds,
)
|
(self, sheet_name: 'str | int | list[int] | list[str] | None' = 0, header: 'int | Sequence[int] | None' = 0, names: 'SequenceNotStr[Hashable] | range | None' = None, index_col: 'int | Sequence[int] | None' = None, usecols=None, converters=None, true_values: 'Iterable[Hashable] | None' = None, false_values: 'Iterable[Hashable] | None' = None, skiprows: 'Sequence[int] | int | Callable[[int], object] | None' = None, nrows: 'int | None' = None, na_values=None, parse_dates: 'list | dict | bool' = False, date_parser: 'Callable | lib.NoDefault' = <no_default>, date_format: 'str | dict[Hashable, str] | None' = None, thousands: 'str | None' = None, comment: 'str | None' = None, skipfooter: 'int' = 0, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, **kwds) -> 'DataFrame | dict[str, DataFrame] | dict[int, DataFrame]'
|
66,271 |
pandas.io.excel._base
|
ExcelWriter
|
Class for writing DataFrame objects into excel sheets.
Default is to use:
* `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter
is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
* `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
See ``DataFrame.to_excel`` for typical usage.
The writer should be used as a context manager. Otherwise, call `close()` to save
and close any opened file handles.
Parameters
----------
path : str or typing.BinaryIO
Path to xls or xlsx or ods file.
engine : str (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : str, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
datetime_format : str, default None
Format string for datetime objects written into Excel files.
(e.g. 'YYYY-MM-DD HH:MM:SS').
mode : {'w', 'a'}, default 'w'
File mode to use (write or append). Append does not work with fsspec URLs.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
if_sheet_exists : {'error', 'new', 'replace', 'overlay'}, default 'error'
How to behave when trying to write to a sheet that already
exists (append mode only).
* error: raise a ValueError.
* new: Create a new sheet, with a name determined by the engine.
* replace: Delete the contents of the sheet before writing to it.
* overlay: Write contents to the existing sheet without first removing,
but possibly over top of, the existing contents.
.. versionadded:: 1.3.0
.. versionchanged:: 1.4.0
Added ``overlay`` option
engine_kwargs : dict, optional
Keyword arguments to be passed into the engine. These will be passed to
the following functions of the respective engines:
* xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
* openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
* openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
* odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
.. versionadded:: 1.3.0
Notes
-----
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
... df.to_excel(writer) # doctest: +SKIP
To write to separate sheets in a single file:
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
You can set the date format or datetime format:
>>> from datetime import date, datetime # doctest: +SKIP
>>> df = pd.DataFrame(
... [
... [date(2014, 1, 31), date(1999, 9, 24)],
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
... ],
... index=["Date", "Datetime"],
... columns=["X", "Y"],
... ) # doctest: +SKIP
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... date_format="YYYY-MM-DD",
... datetime_format="YYYY-MM-DD HH:MM:SS"
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
You can also append to an existing Excel file:
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
already exists:
>>> with ExcelWriter(
... "path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="replace",
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
You can also write multiple DataFrames to a single sheet. Note that the
``if_sheet_exists`` parameter needs to be set to ``overlay``:
>>> with ExcelWriter("path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="overlay",
... ) as writer:
... df1.to_excel(writer, sheet_name="Sheet1")
... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
You can store Excel file in RAM:
>>> import io
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> buffer = io.BytesIO()
>>> with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
You can pack Excel file into zip archive:
>>> import zipfile # doctest: +SKIP
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
... with zf.open("filename.xlsx", "w") as buffer:
... with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer) # doctest: +SKIP
You can specify additional arguments to the underlying engine:
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="xlsxwriter",
... engine_kwargs={"options": {"nan_inf_to_errors": True}}
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
In append mode, ``engine_kwargs`` are passed through to
openpyxl's ``load_workbook``:
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="openpyxl",
... mode="a",
... engine_kwargs={"keep_vba": True}
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
class ExcelWriter(Generic[_WorkbookT]):
"""
Class for writing DataFrame objects into excel sheets.
Default is to use:
* `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter
is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
* `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
See ``DataFrame.to_excel`` for typical usage.
The writer should be used as a context manager. Otherwise, call `close()` to save
and close any opened file handles.
Parameters
----------
path : str or typing.BinaryIO
Path to xls or xlsx or ods file.
engine : str (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : str, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
datetime_format : str, default None
Format string for datetime objects written into Excel files.
(e.g. 'YYYY-MM-DD HH:MM:SS').
mode : {{'w', 'a'}}, default 'w'
File mode to use (write or append). Append does not work with fsspec URLs.
{storage_options}
if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
How to behave when trying to write to a sheet that already
exists (append mode only).
* error: raise a ValueError.
* new: Create a new sheet, with a name determined by the engine.
* replace: Delete the contents of the sheet before writing to it.
* overlay: Write contents to the existing sheet without first removing,
but possibly over top of, the existing contents.
.. versionadded:: 1.3.0
.. versionchanged:: 1.4.0
Added ``overlay`` option
engine_kwargs : dict, optional
Keyword arguments to be passed into the engine. These will be passed to
the following functions of the respective engines:
* xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
* openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
* openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
* odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
.. versionadded:: 1.3.0
Notes
-----
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
... df.to_excel(writer) # doctest: +SKIP
To write to separate sheets in a single file:
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
You can set the date format or datetime format:
>>> from datetime import date, datetime # doctest: +SKIP
>>> df = pd.DataFrame(
... [
... [date(2014, 1, 31), date(1999, 9, 24)],
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
... ],
... index=["Date", "Datetime"],
... columns=["X", "Y"],
... ) # doctest: +SKIP
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... date_format="YYYY-MM-DD",
... datetime_format="YYYY-MM-DD HH:MM:SS"
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
You can also append to an existing Excel file:
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
already exists:
>>> with ExcelWriter(
... "path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="replace",
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
You can also write multiple DataFrames to a single sheet. Note that the
``if_sheet_exists`` parameter needs to be set to ``overlay``:
>>> with ExcelWriter("path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="overlay",
... ) as writer:
... df1.to_excel(writer, sheet_name="Sheet1")
... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
You can store Excel file in RAM:
>>> import io
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> buffer = io.BytesIO()
>>> with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
You can pack Excel file into zip archive:
>>> import zipfile # doctest: +SKIP
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
... with zf.open("filename.xlsx", "w") as buffer:
... with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer) # doctest: +SKIP
You can specify additional arguments to the underlying engine:
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="xlsxwriter",
... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
In append mode, ``engine_kwargs`` are passed through to
openpyxl's ``load_workbook``:
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="openpyxl",
... mode="a",
... engine_kwargs={{"keep_vba": True}}
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``_supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``_engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
_engine: str
_supported_extensions: tuple[str, ...]
def __new__(
cls,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict | None = None,
) -> Self:
# only switch class if generic(ExcelWriter)
if cls is ExcelWriter:
if engine is None or (isinstance(engine, str) and engine == "auto"):
if isinstance(path, str):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="writer")
except KeyError as err:
raise ValueError(f"No engine for filetype: '{ext}'") from err
# for mypy
assert engine is not None
# error: Incompatible types in assignment (expression has type
# "type[ExcelWriter[Any]]", variable has type "type[Self]")
cls = get_writer(engine) # type: ignore[assignment]
return object.__new__(cls)
# declare external properties you can count on
_path = None
@property
def supported_extensions(self) -> tuple[str, ...]:
"""Extensions that writer engine supports."""
return self._supported_extensions
@property
def engine(self) -> str:
"""Name of engine."""
return self._engine
@property
def sheets(self) -> dict[str, Any]:
"""Mapping of sheet names to sheet objects."""
raise NotImplementedError
@property
def book(self) -> _WorkbookT:
"""
Book instance. Class type will depend on the engine used.
This attribute can be used to access engine-specific features.
"""
raise NotImplementedError
def _write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
) -> None:
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
raise NotImplementedError
def _save(self) -> None:
"""
Save workbook to disk.
"""
raise NotImplementedError
def __init__(
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
) -> None:
# validate that this engine can handle the extension
if isinstance(path, str):
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
# use mode to open the file
if "b" not in mode:
mode += "b"
# use "a" for the user to append data to excel but internally use "r+" to let
# the excel backend first read the existing file and then write any data to it
mode = mode.replace("a", "r+")
if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
raise ValueError(
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
"Valid options are 'error', 'new', 'replace' and 'overlay'."
)
if if_sheet_exists and "r+" not in mode:
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
if if_sheet_exists is None:
if_sheet_exists = "error"
self._if_sheet_exists = if_sheet_exists
# cast ExcelWriter to avoid adding 'if self._handles is not None'
self._handles = IOHandles(
cast(IO[bytes], path), compression={"compression": None}
)
if not isinstance(path, ExcelWriter):
self._handles = get_handle(
path, mode, storage_options=storage_options, is_text=False
)
self._cur_sheet = None
if date_format is None:
self._date_format = "YYYY-MM-DD"
else:
self._date_format = date_format
if datetime_format is None:
self._datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self._datetime_format = datetime_format
self._mode = mode
@property
def date_format(self) -> str:
"""
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
"""
return self._date_format
@property
def datetime_format(self) -> str:
"""
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
"""
return self._datetime_format
@property
def if_sheet_exists(self) -> str:
"""
How to behave when writing to a sheet that already exists in append mode.
"""
return self._if_sheet_exists
def __fspath__(self) -> str:
return getattr(self._handles.handle, "name", "")
def _get_sheet_name(self, sheet_name: str | None) -> str:
if sheet_name is None:
sheet_name = self._cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
return sheet_name
def _value_with_fmt(
self, val
) -> tuple[
int | float | bool | str | datetime.datetime | datetime.date, str | None
]:
"""
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime.datetime):
fmt = self._datetime_format
elif isinstance(val, datetime.date):
fmt = self._date_format
elif isinstance(val, datetime.timedelta):
val = val.total_seconds() / 86400
fmt = "0"
else:
val = str(val)
return val, fmt
@classmethod
def check_extension(cls, ext: str) -> Literal[True]:
"""
checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError.
"""
if ext.startswith("."):
ext = ext[1:]
if not any(ext in extension for extension in cls._supported_extensions):
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
return True
# Allow use as a contextmanager
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
def close(self) -> None:
"""synonym for save, to make it more file-like"""
self._save()
self._handles.close()
|
(path: 'FilePath | WriteExcelBuffer | ExcelWriter', engine: 'str | None' = None, date_format: 'str | None' = None, datetime_format: 'str | None' = None, mode: 'str' = 'w', storage_options: 'StorageOptions | None' = None, if_sheet_exists: 'ExcelWriterIfSheetExists | None' = None, engine_kwargs: 'dict | None' = None) -> 'Self'
|
66,274 |
pandas.io.excel._base
|
__fspath__
| null |
def __fspath__(self) -> str:
return getattr(self._handles.handle, "name", "")
|
(self) -> str
|
66,275 |
pandas.io.excel._base
|
__init__
| null |
def __init__(
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
) -> None:
# validate that this engine can handle the extension
if isinstance(path, str):
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
# use mode to open the file
if "b" not in mode:
mode += "b"
# use "a" for the user to append data to excel but internally use "r+" to let
# the excel backend first read the existing file and then write any data to it
mode = mode.replace("a", "r+")
if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
raise ValueError(
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
"Valid options are 'error', 'new', 'replace' and 'overlay'."
)
if if_sheet_exists and "r+" not in mode:
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
if if_sheet_exists is None:
if_sheet_exists = "error"
self._if_sheet_exists = if_sheet_exists
# cast ExcelWriter to avoid adding 'if self._handles is not None'
self._handles = IOHandles(
cast(IO[bytes], path), compression={"compression": None}
)
if not isinstance(path, ExcelWriter):
self._handles = get_handle(
path, mode, storage_options=storage_options, is_text=False
)
self._cur_sheet = None
if date_format is None:
self._date_format = "YYYY-MM-DD"
else:
self._date_format = date_format
if datetime_format is None:
self._datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self._datetime_format = datetime_format
self._mode = mode
|
(self, path: 'FilePath | WriteExcelBuffer | ExcelWriter', engine: 'str | None' = None, date_format: 'str | None' = None, datetime_format: 'str | None' = None, mode: 'str' = 'w', storage_options: 'StorageOptions | None' = None, if_sheet_exists: 'ExcelWriterIfSheetExists | None' = None, engine_kwargs: 'dict[str, Any] | None' = None) -> 'None'
|
66,276 |
pandas.io.excel._base
|
__new__
| null |
def __new__(
cls,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict | None = None,
) -> Self:
# only switch class if generic(ExcelWriter)
if cls is ExcelWriter:
if engine is None or (isinstance(engine, str) and engine == "auto"):
if isinstance(path, str):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="writer")
except KeyError as err:
raise ValueError(f"No engine for filetype: '{ext}'") from err
# for mypy
assert engine is not None
# error: Incompatible types in assignment (expression has type
# "type[ExcelWriter[Any]]", variable has type "type[Self]")
cls = get_writer(engine) # type: ignore[assignment]
return object.__new__(cls)
|
(cls, path: 'FilePath | WriteExcelBuffer | ExcelWriter', engine: 'str | None' = None, date_format: 'str | None' = None, datetime_format: 'str | None' = None, mode: 'str' = 'w', storage_options: 'StorageOptions | None' = None, if_sheet_exists: 'ExcelWriterIfSheetExists | None' = None, engine_kwargs: 'dict | None' = None) -> 'Self'
|
66,277 |
pandas.io.excel._base
|
_get_sheet_name
| null |
def _get_sheet_name(self, sheet_name: str | None) -> str:
if sheet_name is None:
sheet_name = self._cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
return sheet_name
|
(self, sheet_name: str | None) -> str
|
66,278 |
pandas.io.excel._base
|
_save
|
Save workbook to disk.
|
def _save(self) -> None:
"""
Save workbook to disk.
"""
raise NotImplementedError
|
(self) -> NoneType
|
66,279 |
pandas.io.excel._base
|
_value_with_fmt
|
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
|
def _value_with_fmt(
self, val
) -> tuple[
int | float | bool | str | datetime.datetime | datetime.date, str | None
]:
"""
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime.datetime):
fmt = self._datetime_format
elif isinstance(val, datetime.date):
fmt = self._date_format
elif isinstance(val, datetime.timedelta):
val = val.total_seconds() / 86400
fmt = "0"
else:
val = str(val)
return val, fmt
|
(self, val) -> tuple[int | float | bool | str | datetime.datetime | datetime.date, str | None]
|
66,280 |
pandas.io.excel._base
|
_write_cells
|
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
|
def _write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
) -> None:
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
raise NotImplementedError
|
(self, cells, sheet_name: Optional[str] = None, startrow: int = 0, startcol: int = 0, freeze_panes: Optional[tuple[int, int]] = None) -> NoneType
|
66,281 |
pandas.io.excel._base
|
close
|
synonym for save, to make it more file-like
|
def close(self) -> None:
"""synonym for save, to make it more file-like"""
self._save()
self._handles.close()
|
(self) -> NoneType
|
66,282 |
pandas.core.flags
|
Flags
|
Flags that apply to pandas objects.
Parameters
----------
obj : Series or DataFrame
The object these flags are associated with.
allows_duplicate_labels : bool, default True
Whether to allow duplicate labels in this object. By default,
duplicate labels are permitted. Setting this to ``False`` will
cause an :class:`errors.DuplicateLabelError` to be raised when
`index` (or columns for DataFrame) is not unique, or any
subsequent operation on introduces duplicates.
See :ref:`duplicates.disallow` for more.
.. warning::
This is an experimental feature. Currently, many methods fail to
propagate the ``allows_duplicate_labels`` value. In future versions
it is expected that every method taking or returning one or more
DataFrame or Series objects will propagate ``allows_duplicate_labels``.
Examples
--------
Attributes can be set in two ways:
>>> df = pd.DataFrame()
>>> df.flags
<Flags(allows_duplicate_labels=True)>
>>> df.flags.allows_duplicate_labels = False
>>> df.flags
<Flags(allows_duplicate_labels=False)>
>>> df.flags['allows_duplicate_labels'] = True
>>> df.flags
<Flags(allows_duplicate_labels=True)>
|
class Flags:
"""
Flags that apply to pandas objects.
Parameters
----------
obj : Series or DataFrame
The object these flags are associated with.
allows_duplicate_labels : bool, default True
Whether to allow duplicate labels in this object. By default,
duplicate labels are permitted. Setting this to ``False`` will
cause an :class:`errors.DuplicateLabelError` to be raised when
`index` (or columns for DataFrame) is not unique, or any
subsequent operation on introduces duplicates.
See :ref:`duplicates.disallow` for more.
.. warning::
This is an experimental feature. Currently, many methods fail to
propagate the ``allows_duplicate_labels`` value. In future versions
it is expected that every method taking or returning one or more
DataFrame or Series objects will propagate ``allows_duplicate_labels``.
Examples
--------
Attributes can be set in two ways:
>>> df = pd.DataFrame()
>>> df.flags
<Flags(allows_duplicate_labels=True)>
>>> df.flags.allows_duplicate_labels = False
>>> df.flags
<Flags(allows_duplicate_labels=False)>
>>> df.flags['allows_duplicate_labels'] = True
>>> df.flags
<Flags(allows_duplicate_labels=True)>
"""
_keys: set[str] = {"allows_duplicate_labels"}
def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None:
self._allows_duplicate_labels = allows_duplicate_labels
self._obj = weakref.ref(obj)
@property
def allows_duplicate_labels(self) -> bool:
"""
Whether this object allows duplicate labels.
Setting ``allows_duplicate_labels=False`` ensures that the
index (and columns of a DataFrame) are unique. Most methods
that accept and return a Series or DataFrame will propagate
the value of ``allows_duplicate_labels``.
See :ref:`duplicates` for more.
See Also
--------
DataFrame.attrs : Set global metadata on this object.
DataFrame.set_flags : Set global flags on this object.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False
Traceback (most recent call last):
...
pandas.errors.DuplicateLabelError: Index has duplicates.
positions
label
a [0, 1]
"""
return self._allows_duplicate_labels
@allows_duplicate_labels.setter
def allows_duplicate_labels(self, value: bool) -> None:
value = bool(value)
obj = self._obj()
if obj is None:
raise ValueError("This flag's object has been deleted.")
if not value:
for ax in obj.axes:
ax._maybe_check_unique()
self._allows_duplicate_labels = value
def __getitem__(self, key: str):
if key not in self._keys:
raise KeyError(key)
return getattr(self, key)
def __setitem__(self, key: str, value) -> None:
if key not in self._keys:
raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
setattr(self, key, value)
def __repr__(self) -> str:
return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
def __eq__(self, other) -> bool:
if isinstance(other, type(self)):
return self.allows_duplicate_labels == other.allows_duplicate_labels
return False
|
(obj: 'NDFrame', *, allows_duplicate_labels: 'bool') -> 'None'
|
66,283 |
pandas.core.flags
|
__eq__
| null |
def __eq__(self, other) -> bool:
if isinstance(other, type(self)):
return self.allows_duplicate_labels == other.allows_duplicate_labels
return False
|
(self, other) -> bool
|
66,284 |
pandas.core.flags
|
__getitem__
| null |
def __getitem__(self, key: str):
if key not in self._keys:
raise KeyError(key)
return getattr(self, key)
|
(self, key: str)
|
66,285 |
pandas.core.flags
|
__init__
| null |
def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None:
self._allows_duplicate_labels = allows_duplicate_labels
self._obj = weakref.ref(obj)
|
(self, obj: 'NDFrame', *, allows_duplicate_labels: 'bool') -> 'None'
|
66,286 |
pandas.core.flags
|
__repr__
| null |
def __repr__(self) -> str:
return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
|
(self) -> str
|
66,287 |
pandas.core.flags
|
__setitem__
| null |
def __setitem__(self, key: str, value) -> None:
if key not in self._keys:
raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
setattr(self, key, value)
|
(self, key: str, value) -> NoneType
|
66,288 |
pandas.core.arrays.floating
|
Float32Dtype
|
An ExtensionDtype for float32 data.
This dtype uses ``pd.NA`` as missing value indicator.
Attributes
----------
None
Methods
-------
None
Examples
--------
For Float32Dtype:
>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype())
>>> ser.dtype
Float32Dtype()
For Float64Dtype:
>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype())
>>> ser.dtype
Float64Dtype()
|
class Float32Dtype(FloatingDtype):
type = np.float32
name: ClassVar[str] = "Float32"
__doc__ = _dtype_docstring.format(dtype="float32")
|
()
|
66,290 |
pandas.core.arrays.numeric
|
__from_arrow__
|
Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
|
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BaseMaskedArray:
"""
Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays.arrow._arrow_utils import (
pyarrow_array_to_numpy_and_mask,
)
array_class = self.construct_array_type()
pyarrow_type = pyarrow.from_numpy_dtype(self.type)
if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null(
array.type
):
# test_from_arrow_type_error raise for string, but allow
# through itemsize conversion GH#31896
rt_dtype = pandas_dtype(array.type.to_pandas_dtype())
if rt_dtype.kind not in "iuf":
# Could allow "c" or potentially disallow float<->int conversion,
# but at the moment we specifically test that uint<->int works
raise TypeError(
f"Expected array of {self} type, got {array.type} instead"
)
array = array.cast(pyarrow_type)
if isinstance(array, pyarrow.ChunkedArray):
# TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed
# combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757
if array.num_chunks == 0:
array = pyarrow.array([], type=array.type)
else:
array = array.combine_chunks()
data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype)
return array_class(data.copy(), ~mask, copy=False)
|
(self, array: 'pyarrow.Array | pyarrow.ChunkedArray') -> 'BaseMaskedArray'
|
66,293 |
pandas.core.arrays.numeric
|
__repr__
| null |
def __repr__(self) -> str:
return f"{self.name}Dtype()"
|
(self) -> str
|
66,295 |
pandas.core.dtypes.common
|
is_float_dtype
|
Check whether the provided array or dtype is of a float dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a float dtype.
Examples
--------
>>> from pandas.api.types import is_float_dtype
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(['a', 'b']))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.]))
True
|
def is_float_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a float dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a float dtype.
Examples
--------
>>> from pandas.api.types import is_float_dtype
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(['a', 'b']))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.]))
True
"""
return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f"
)
|
(arr_or_dtype) -> bool
|
66,298 |
pandas.core.arrays.floating
|
Float64Dtype
|
An ExtensionDtype for float64 data.
This dtype uses ``pd.NA`` as missing value indicator.
Attributes
----------
None
Methods
-------
None
Examples
--------
For Float32Dtype:
>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype())
>>> ser.dtype
Float32Dtype()
For Float64Dtype:
>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype())
>>> ser.dtype
Float64Dtype()
|
class Float64Dtype(FloatingDtype):
type = np.float64
name: ClassVar[str] = "Float64"
__doc__ = _dtype_docstring.format(dtype="float64")
|
()
|
66,308 |
pandas.core.groupby.grouper
|
Grouper
|
A Grouper allows the user to specify a groupby instruction for an object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
If `axis` and/or `level` are passed as keywords to both `Grouper` and
`groupby`, the values passed to `Grouper` take precedence.
Parameters
----------
key : str, defaults to None
Groupby key, which selects the grouping column of the target.
level : name/number, defaults to None
The level for the target index.
freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : str, int, defaults to 0
Number/name of the axis.
sort : bool, default to False
Whether to sort the resulting labels.
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
origin : Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If string, must be one of the following:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
.. versionadded:: 1.3.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
dropna : bool, default True
If True, and if group keys contain NA values, NA values together with
row/column will be dropped. If False, NA values will also be treated as
the key in groups.
Returns
-------
Grouper or pandas.api.typing.TimeGrouper
A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper
is returned.
Examples
--------
``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``
>>> df = pd.DataFrame(
... {
... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
... "Speed": [100, 5, 200, 300, 15],
... }
... )
>>> df
Animal Speed
0 Falcon 100
1 Parrot 5
2 Falcon 200
3 Falcon 300
4 Parrot 15
>>> df.groupby(pd.Grouper(key="Animal")).mean()
Speed
Animal
Falcon 200.0
Parrot 10.0
Specify a resample operation on the column 'Publish date'
>>> df = pd.DataFrame(
... {
... "Publish date": [
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
... pd.Timestamp("2000-01-16")
... ],
... "ID": [0, 1, 2, 3],
... "Price": [10, 20, 30, 40]
... }
... )
>>> df
Publish date ID Price
0 2000-01-02 0 10
1 2000-01-02 1 20
2 2000-01-09 2 30
3 2000-01-16 3 40
>>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
ID Price
Publish date
2000-01-02 0.5 15.0
2000-01-09 2.0 30.0
2000-01-16 3.0 40.0
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min')).sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17min, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17min, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17min, dtype: int64
|
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for an object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
If `axis` and/or `level` are passed as keywords to both `Grouper` and
`groupby`, the values passed to `Grouper` take precedence.
Parameters
----------
key : str, defaults to None
Groupby key, which selects the grouping column of the target.
level : name/number, defaults to None
The level for the target index.
freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : str, int, defaults to 0
Number/name of the axis.
sort : bool, default to False
Whether to sort the resulting labels.
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
origin : Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If string, must be one of the following:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
.. versionadded:: 1.3.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
dropna : bool, default True
If True, and if group keys contain NA values, NA values together with
row/column will be dropped. If False, NA values will also be treated as
the key in groups.
Returns
-------
Grouper or pandas.api.typing.TimeGrouper
A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper
is returned.
Examples
--------
``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')``
>>> df = pd.DataFrame(
... {
... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
... "Speed": [100, 5, 200, 300, 15],
... }
... )
>>> df
Animal Speed
0 Falcon 100
1 Parrot 5
2 Falcon 200
3 Falcon 300
4 Parrot 15
>>> df.groupby(pd.Grouper(key="Animal")).mean()
Speed
Animal
Falcon 200.0
Parrot 10.0
Specify a resample operation on the column 'Publish date'
>>> df = pd.DataFrame(
... {
... "Publish date": [
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
... pd.Timestamp("2000-01-16")
... ],
... "ID": [0, 1, 2, 3],
... "Price": [10, 20, 30, 40]
... }
... )
>>> df
Publish date ID Price
0 2000-01-02 0 10
1 2000-01-02 1 20
2 2000-01-09 2 30
3 2000-01-16 3 40
>>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
ID Price
Publish date
2000-01-02 0.5 15.0
2000-01-09 2.0 30.0
2000-01-16 3.0 40.0
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min')).sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17min, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17min, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17min, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17min, dtype: int64
"""
sort: bool
dropna: bool
_gpr_index: Index | None
_grouper: Index | None
_attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna")
def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super().__new__(cls)
def __init__(
self,
key=None,
level=None,
freq=None,
axis: Axis | lib.NoDefault = lib.no_default,
sort: bool = False,
dropna: bool = True,
) -> None:
if type(self) is Grouper:
# i.e. not TimeGrouper
if axis is not lib.no_default:
warnings.warn(
"Grouper axis keyword is deprecated and will be removed in a "
"future version. To group on axis=1, use obj.T.groupby(...) "
"instead",
FutureWarning,
stacklevel=find_stack_level(),
)
else:
axis = 0
if axis is lib.no_default:
axis = 0
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.dropna = dropna
self._grouper_deprecated = None
self._indexer_deprecated: npt.NDArray[np.intp] | None = None
self._obj_deprecated = None
self._gpr_index = None
self.binner = None
self._grouper = None
self._indexer: npt.NDArray[np.intp] | None = None
def _get_grouper(
self, obj: NDFrameT, validate: bool = True
) -> tuple[ops.BaseGrouper, NDFrameT]:
"""
Parameters
----------
obj : Series or DataFrame
validate : bool, default True
if True, validate the grouper
Returns
-------
a tuple of grouper, obj (possibly sorted)
"""
obj, _, _ = self._set_grouper(obj)
grouper, _, obj = get_grouper(
obj,
[self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate,
dropna=self.dropna,
)
# Without setting this, subsequent lookups to .groups raise
# error: Incompatible types in assignment (expression has type "BaseGrouper",
# variable has type "None")
self._grouper_deprecated = grouper # type: ignore[assignment]
return grouper, obj
def _set_grouper(
self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
gpr_index : Index or None, default None
Returns
-------
NDFrame
Index
np.ndarray[np.intp] | None
"""
assert obj is not None
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# Keep self._grouper value before overriding
if self._grouper is None:
# TODO: What are we assuming about subsequent calls?
self._grouper = gpr_index
self._indexer = self._indexer_deprecated
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
# Sometimes self._grouper will have been resorted while
# obj has not. In this case there is a mismatch when we
# call self._grouper.take(obj.index) so we need to undo the sorting
# before we call _grouper.take.
assert self._grouper is not None
if self._indexer is not None:
reverse_indexer = self._indexer.argsort()
unsorted_ax = self._grouper.take(reverse_indexer)
ax = unsorted_ax.take(obj.index)
else:
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(f"The grouper name {key} is not found")
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(f"The level {level} is not valid")
# possibly sort
indexer: npt.NDArray[np.intp] | None = None
if (self.sort or sort) and not ax.is_monotonic_increasing:
# use stable sort to support first, last, nth
# TODO: why does putting na_position="first" fix datetimelike cases?
indexer = self._indexer_deprecated = ax.array.argsort(
kind="mergesort", na_position="first"
)
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis)
# error: Incompatible types in assignment (expression has type
# "NDFrameT", variable has type "None")
self._obj_deprecated = obj # type: ignore[assignment]
self._gpr_index = ax
return obj, ax, indexer
@final
@property
def ax(self) -> Index:
warnings.warn(
f"{type(self).__name__}.ax is deprecated and will be removed in a "
"future version. Use Resampler.ax instead",
FutureWarning,
stacklevel=find_stack_level(),
)
index = self._gpr_index
if index is None:
raise ValueError("_set_grouper must be called before ax is accessed")
return index
@final
@property
def indexer(self):
warnings.warn(
f"{type(self).__name__}.indexer is deprecated and will be removed "
"in a future version. Use Resampler.indexer instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._indexer_deprecated
@final
@property
def obj(self):
# TODO(3.0): enforcing these deprecations on Grouper should close
# GH#25564, GH#41930
warnings.warn(
f"{type(self).__name__}.obj is deprecated and will be removed "
"in a future version. Use GroupBy.indexer instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._obj_deprecated
@final
@property
def grouper(self):
warnings.warn(
f"{type(self).__name__}.grouper is deprecated and will be removed "
"in a future version. Use GroupBy.grouper instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._grouper_deprecated
@final
@property
def groups(self):
warnings.warn(
f"{type(self).__name__}.groups is deprecated and will be removed "
"in a future version. Use GroupBy.groups instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
# error: "None" has no attribute "groups"
return self._grouper_deprecated.groups # type: ignore[attr-defined]
@final
def __repr__(self) -> str:
attrs_list = (
f"{attr_name}={repr(getattr(self, attr_name))}"
for attr_name in self._attributes
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
cls_name = type(self).__name__
return f"{cls_name}({attrs})"
|
(*args, **kwargs)
|
66,309 |
pandas.core.groupby.grouper
|
__init__
| null |
def __init__(
self,
key=None,
level=None,
freq=None,
axis: Axis | lib.NoDefault = lib.no_default,
sort: bool = False,
dropna: bool = True,
) -> None:
if type(self) is Grouper:
# i.e. not TimeGrouper
if axis is not lib.no_default:
warnings.warn(
"Grouper axis keyword is deprecated and will be removed in a "
"future version. To group on axis=1, use obj.T.groupby(...) "
"instead",
FutureWarning,
stacklevel=find_stack_level(),
)
else:
axis = 0
if axis is lib.no_default:
axis = 0
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.dropna = dropna
self._grouper_deprecated = None
self._indexer_deprecated: npt.NDArray[np.intp] | None = None
self._obj_deprecated = None
self._gpr_index = None
self.binner = None
self._grouper = None
self._indexer: npt.NDArray[np.intp] | None = None
|
(self, key=None, level=None, freq=None, axis: 'Axis | lib.NoDefault' = <no_default>, sort: 'bool' = False, dropna: 'bool' = True) -> 'None'
|
66,310 |
pandas.core.groupby.grouper
|
__new__
| null |
def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super().__new__(cls)
|
(cls, *args, **kwargs)
|
66,311 |
pandas.core.groupby.grouper
|
__repr__
| null |
@final
def __repr__(self) -> str:
attrs_list = (
f"{attr_name}={repr(getattr(self, attr_name))}"
for attr_name in self._attributes
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
cls_name = type(self).__name__
return f"{cls_name}({attrs})"
|
(self) -> str
|
66,312 |
pandas.core.groupby.grouper
|
_get_grouper
|
Parameters
----------
obj : Series or DataFrame
validate : bool, default True
if True, validate the grouper
Returns
-------
a tuple of grouper, obj (possibly sorted)
|
def _get_grouper(
self, obj: NDFrameT, validate: bool = True
) -> tuple[ops.BaseGrouper, NDFrameT]:
"""
Parameters
----------
obj : Series or DataFrame
validate : bool, default True
if True, validate the grouper
Returns
-------
a tuple of grouper, obj (possibly sorted)
"""
obj, _, _ = self._set_grouper(obj)
grouper, _, obj = get_grouper(
obj,
[self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate,
dropna=self.dropna,
)
# Without setting this, subsequent lookups to .groups raise
# error: Incompatible types in assignment (expression has type "BaseGrouper",
# variable has type "None")
self._grouper_deprecated = grouper # type: ignore[assignment]
return grouper, obj
|
(self, obj: 'NDFrameT', validate: 'bool' = True) -> 'tuple[ops.BaseGrouper, NDFrameT]'
|
66,313 |
pandas.core.groupby.grouper
|
_set_grouper
|
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
gpr_index : Index or None, default None
Returns
-------
NDFrame
Index
np.ndarray[np.intp] | None
|
def _set_grouper(
self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None
) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]:
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
gpr_index : Index or None, default None
Returns
-------
NDFrame
Index
np.ndarray[np.intp] | None
"""
assert obj is not None
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# Keep self._grouper value before overriding
if self._grouper is None:
# TODO: What are we assuming about subsequent calls?
self._grouper = gpr_index
self._indexer = self._indexer_deprecated
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(gpr_index, "name", None) == key and isinstance(obj, Series):
# Sometimes self._grouper will have been resorted while
# obj has not. In this case there is a mismatch when we
# call self._grouper.take(obj.index) so we need to undo the sorting
# before we call _grouper.take.
assert self._grouper is not None
if self._indexer is not None:
reverse_indexer = self._indexer.argsort()
unsorted_ax = self._grouper.take(reverse_indexer)
ax = unsorted_ax.take(obj.index)
else:
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(f"The grouper name {key} is not found")
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(f"The level {level} is not valid")
# possibly sort
indexer: npt.NDArray[np.intp] | None = None
if (self.sort or sort) and not ax.is_monotonic_increasing:
# use stable sort to support first, last, nth
# TODO: why does putting na_position="first" fix datetimelike cases?
indexer = self._indexer_deprecated = ax.array.argsort(
kind="mergesort", na_position="first"
)
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis)
# error: Incompatible types in assignment (expression has type
# "NDFrameT", variable has type "None")
self._obj_deprecated = obj # type: ignore[assignment]
self._gpr_index = ax
return obj, ax, indexer
|
(self, obj: 'NDFrameT', sort: 'bool' = False, *, gpr_index: 'Index | None' = None) -> 'tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]'
|
66,314 |
pandas.io.pytables
|
HDFStore
|
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
These additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
>>> store['foo'] = bar
>>> store.close() # only now, data is written to disk
|
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
These additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
>>> store['foo'] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: File | None
_mode: str
def __init__(
self,
path,
mode: str = "a",
complevel: int | None = None,
complib=None,
fletcher32: bool = False,
**kwargs,
) -> None:
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self) -> str:
return self._path
@property
def root(self):
"""return the root node"""
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self) -> str:
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value) -> None:
self.put(key, value)
def __delitem__(self, key: str) -> None:
return self.remove(key)
def __getattr__(self, name: str):
"""allow attribute access to get stores"""
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if key in (name, name[1:]):
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
def keys(self, include: str = "pandas") -> list[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self) -> Iterator[str]:
return iter(self.keys())
def items(self) -> Iterator[tuple[str, list]]:
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
def open(self, mode: str = "a", **kwargs) -> None:
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self) -> None:
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False) -> None:
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select('/data1') # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select('/data1', where='columns == A') # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = {t.non_index_axes[0][0] for t in _tbls}.pop()
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: DataFrame | Series,
format=None,
index: bool = True,
append: bool = False,
complib=None,
complevel: int | None = None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
) -> None:
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list of columns or True, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
dropna : bool, default False, optional
Remove missing values.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None) -> None:
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: DataFrame | Series,
format=None,
axes=None,
index: bool | list[str] = True,
append: bool = True,
complib=None,
complevel: int | None = None,
columns=None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
) -> None:
"""
Append to Table in file.
Node must already exist and be Table format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df1, format='table') # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
>>> store.append('data', df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
A B
0 1 2
1 3 4
0 5 6
1 7 8
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: dict,
value,
selector,
data_columns=None,
axes=None,
dropna: bool = False,
**kwargs,
) -> None:
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)])))
# figure out how to split the value
remain_key = None
remain_values: list = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: int | None = None,
kind: str | None = None,
) -> None:
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self) -> list:
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df1, format='table') # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
>>> store.append('data', df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> for group in store.walk(): # doctest: +SKIP
... print(group) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Node | None:
"""return the node with the key or None if it does not exist"""
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> GenericFixed | Table:
"""return the storer object for a key, raise if not in the file"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode: str = "w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: int | None = None,
fletcher32: bool = False,
overwrite: bool = True,
) -> HDFStore:
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: bool | list[str] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data frame (shape->[2,2])
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self) -> None:
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
"""validate / deprecate formats"""
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: DataFrame | Series | None = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> GenericFixed | Table:
"""return a suitable class to operate"""
cls: type[GenericFixed | Table]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_STORER_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_TABLE_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: DataFrame | Series,
format,
axes=None,
index: bool | list[str] = True,
append: bool = False,
complib=None,
complevel: int | None = None,
fletcher32=None,
min_itemsize: int | dict[str, int] | None = None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
) -> None:
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
|
(path, mode: 'str' = 'a', complevel: 'int | None' = None, complib=None, fletcher32: 'bool' = False, **kwargs) -> 'None'
|
66,315 |
pandas.io.pytables
|
__contains__
|
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
|
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if key in (name, name[1:]):
return True
return False
|
(self, key: str) -> bool
|
66,316 |
pandas.io.pytables
|
__delitem__
| null |
def __delitem__(self, key: str) -> None:
return self.remove(key)
|
(self, key: str) -> NoneType
|
66,319 |
pandas.io.pytables
|
__fspath__
| null |
def __fspath__(self) -> str:
return self._path
|
(self) -> str
|
66,320 |
pandas.io.pytables
|
__getattr__
|
allow attribute access to get stores
|
def __getattr__(self, name: str):
"""allow attribute access to get stores"""
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
|
(self, name: str)
|
66,321 |
pandas.io.pytables
|
__getitem__
| null |
def __getitem__(self, key: str):
return self.get(key)
|
(self, key: str)
|
66,322 |
pandas.io.pytables
|
__init__
| null |
def __init__(
self,
path,
mode: str = "a",
complevel: int | None = None,
complib=None,
fletcher32: bool = False,
**kwargs,
) -> None:
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
|
(self, path, mode: str = 'a', complevel: Optional[int] = None, complib=None, fletcher32: bool = False, **kwargs) -> NoneType
|
66,323 |
pandas.io.pytables
|
__iter__
| null |
def __iter__(self) -> Iterator[str]:
return iter(self.keys())
|
(self) -> 'Iterator[str]'
|
66,324 |
pandas.io.pytables
|
__len__
| null |
def __len__(self) -> int:
return len(self.groups())
|
(self) -> int
|
66,325 |
pandas.io.pytables
|
__repr__
| null |
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
|
(self) -> str
|
66,326 |
pandas.io.pytables
|
__setitem__
| null |
def __setitem__(self, key: str, value) -> None:
self.put(key, value)
|
(self, key: str, value) -> NoneType
|
66,327 |
pandas.io.pytables
|
_check_if_open
| null |
def _check_if_open(self) -> None:
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
|
(self) -> NoneType
|
66,328 |
pandas.io.pytables
|
_create_nodes_and_group
|
Create nodes from key and return group name.
|
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
|
(self, key: 'str') -> 'Node'
|
66,329 |
pandas.io.pytables
|
_create_storer
|
return a suitable class to operate
|
def _create_storer(
self,
group,
format=None,
value: DataFrame | Series | None = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> GenericFixed | Table:
"""return a suitable class to operate"""
cls: type[GenericFixed | Table]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_STORER_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_TABLE_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
|
(self, group, format=None, value: Union[pandas.core.frame.DataFrame, pandas.core.series.Series, NoneType] = None, encoding: str = 'UTF-8', errors: str = 'strict') -> pandas.io.pytables.GenericFixed | pandas.io.pytables.Table
|
66,330 |
pandas.io.pytables
|
_identify_group
|
Identify HDF5 group based on key, delete/create group if needed.
|
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
|
(self, key: 'str', append: 'bool') -> 'Node'
|
66,331 |
pandas.io.pytables
|
_read_group
| null |
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
|
(self, group: 'Node')
|
66,332 |
pandas.io.pytables
|
_validate_format
|
validate / deprecate formats
|
def _validate_format(self, format: str) -> str:
"""validate / deprecate formats"""
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
|
(self, format: str) -> str
|
66,333 |
pandas.io.pytables
|
_write_to_group
| null |
def _write_to_group(
self,
key: str,
value: DataFrame | Series,
format,
axes=None,
index: bool | list[str] = True,
append: bool = False,
complib=None,
complevel: int | None = None,
fletcher32=None,
min_itemsize: int | dict[str, int] | None = None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
) -> None:
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
|
(self, key: str, value: pandas.core.frame.DataFrame | pandas.core.series.Series, format, axes=None, index: bool | list[str] = True, append: bool = False, complib=None, complevel: Optional[int] = None, fletcher32=None, min_itemsize: Union[int, dict[str, int], NoneType] = None, chunksize: Optional[int] = None, expectedrows=None, dropna: bool = False, nan_rep=None, data_columns=None, encoding=None, errors: str = 'strict', track_times: bool = True) -> NoneType
|
66,334 |
pandas.io.pytables
|
append
|
Append to Table in file.
Node must already exist and be Table format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df1, format='table') # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
>>> store.append('data', df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
A B
0 1 2
1 3 4
0 5 6
1 7 8
|
def append(
self,
key: str,
value: DataFrame | Series,
format=None,
axes=None,
index: bool | list[str] = True,
append: bool = True,
complib=None,
complevel: int | None = None,
columns=None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
chunksize: int | None = None,
expectedrows=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
) -> None:
"""
Append to Table in file.
Node must already exist and be Table format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df1, format='table') # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
>>> store.append('data', df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
A B
0 1 2
1 3 4
0 5 6
1 7 8
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
|
(self, key: str, value: pandas.core.frame.DataFrame | pandas.core.series.Series, format=None, axes=None, index: bool | list[str] = True, append: bool = True, complib=None, complevel: Optional[int] = None, columns=None, min_itemsize: Union[int, dict[str, int], NoneType] = None, nan_rep=None, chunksize: Optional[int] = None, expectedrows=None, dropna: Optional[bool] = None, data_columns: Union[Literal[True], list[str], NoneType] = None, encoding=None, errors: str = 'strict') -> NoneType
|
66,335 |
pandas.io.pytables
|
append_to_multiple
|
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
|
def append_to_multiple(
self,
d: dict,
value,
selector,
data_columns=None,
axes=None,
dropna: bool = False,
**kwargs,
) -> None:
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)])))
# figure out how to split the value
remain_key = None
remain_values: list = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
|
(self, d: dict, value, selector, data_columns=None, axes=None, dropna: bool = False, **kwargs) -> NoneType
|
66,336 |
pandas.io.pytables
|
close
|
Close the PyTables file handle
|
def close(self) -> None:
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
|
(self) -> NoneType
|
66,337 |
pandas.io.pytables
|
copy
|
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
|
def copy(
self,
file,
mode: str = "w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: int | None = None,
fletcher32: bool = False,
overwrite: bool = True,
) -> HDFStore:
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: bool | list[str] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
|
(self, file, mode: str = 'w', propindexes: bool = True, keys=None, complib=None, complevel: Optional[int] = None, fletcher32: bool = False, overwrite: bool = True) -> pandas.io.pytables.HDFStore
|
66,338 |
pandas.io.pytables
|
create_table_index
|
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
|
def create_table_index(
self,
key: str,
columns=None,
optlevel: int | None = None,
kind: str | None = None,
) -> None:
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
|
(self, key: str, columns=None, optlevel: Optional[int] = None, kind: Optional[str] = None) -> NoneType
|
66,339 |
pandas.io.pytables
|
flush
|
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
|
def flush(self, fsync: bool = False) -> None:
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
|
(self, fsync: bool = False) -> NoneType
|
66,340 |
pandas.io.pytables
|
get
|
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> store.close() # doctest: +SKIP
|
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
|
(self, key: str)
|
66,341 |
pandas.io.pytables
|
get_node
|
return the node with the key or None if it does not exist
|
def get_node(self, key: str) -> Node | None:
"""return the node with the key or None if it does not exist"""
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
|
(self, key: 'str') -> 'Node | None'
|
66,342 |
pandas.io.pytables
|
get_storer
|
return the storer object for a key, raise if not in the file
|
def get_storer(self, key: str) -> GenericFixed | Table:
"""return the storer object for a key, raise if not in the file"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
|
(self, key: str) -> pandas.io.pytables.GenericFixed | pandas.io.pytables.Table
|
66,343 |
pandas.io.pytables
|
groups
|
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
|
def groups(self) -> list:
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> print(store.groups()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
[/data (Group) ''
children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array),
'block0_items' (Array)]]
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
|
(self) -> list
|
66,344 |
pandas.io.pytables
|
info
|
Print detailed information on the store.
Returns
-------
str
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data frame (shape->[2,2])
|
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data frame (shape->[2,2])
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
|
(self) -> str
|
66,345 |
pandas.io.pytables
|
items
|
iterate on key->group
|
def items(self) -> Iterator[tuple[str, list]]:
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
|
(self) -> 'Iterator[tuple[str, list]]'
|
66,346 |
pandas.io.pytables
|
keys
|
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
|
def keys(self, include: str = "pandas") -> list[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
|
(self, include: str = 'pandas') -> list[str]
|
66,347 |
pandas.io.pytables
|
open
|
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
|
def open(self, mode: str = "a", **kwargs) -> None:
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
|
(self, mode: str = 'a', **kwargs) -> NoneType
|
66,348 |
pandas.io.pytables
|
put
|
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list of columns or True, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
dropna : bool, default False, optional
Remove missing values.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
|
def put(
self,
key: str,
value: DataFrame | Series,
format=None,
index: bool = True,
append: bool = False,
complib=None,
complevel: int | None = None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
) -> None:
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list of columns or True, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
dropna : bool, default False, optional
Remove missing values.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
|
(self, key: str, value: pandas.core.frame.DataFrame | pandas.core.series.Series, format=None, index: bool = True, append: bool = False, complib=None, complevel: Optional[int] = None, min_itemsize: Union[int, dict[str, int], NoneType] = None, nan_rep=None, data_columns: Union[Literal[True], list[str], NoneType] = None, encoding=None, errors: str = 'strict', track_times: bool = True, dropna: bool = False) -> NoneType
|
66,349 |
pandas.io.pytables
|
remove
|
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
|
def remove(self, key: str, where=None, start=None, stop=None) -> None:
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
|
(self, key: str, where=None, start=None, stop=None) -> NoneType
|
66,350 |
pandas.io.pytables
|
select
|
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select('/data1') # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select('/data1', where='columns == A') # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
|
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df) # doctest: +SKIP
>>> store.get('data') # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select('/data1') # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select('/data1', where='columns == A') # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
|
(self, key: str, where=None, start=None, stop=None, columns=None, iterator: bool = False, chunksize: Optional[int] = None, auto_close: bool = False)
|
66,351 |
pandas.io.pytables
|
select_as_coordinates
|
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
|
def select_as_coordinates(
self,
key: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
|
(self, key: str, where=None, start: Optional[int] = None, stop: Optional[int] = None)
|
66,352 |
pandas.io.pytables
|
select_as_multiple
|
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
|
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = {t.non_index_axes[0][0] for t in _tbls}.pop()
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
|
(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator: bool = False, chunksize: Optional[int] = None, auto_close: bool = False)
|
66,353 |
pandas.io.pytables
|
select_column
|
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
|
def select_column(
self,
key: str,
column: str,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
|
(self, key: str, column: str, start: Optional[int] = None, stop: Optional[int] = None)
|
66,354 |
pandas.io.pytables
|
walk
|
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df1, format='table') # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
>>> store.append('data', df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> for group in store.walk(): # doctest: +SKIP
... print(group) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
|
def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
>>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP
>>> store.put('data', df1, format='table') # doctest: +SKIP
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B'])
>>> store.append('data', df2) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
>>> for group in store.walk(): # doctest: +SKIP
... print(group) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
|
(self, where: 'str' = '/') -> 'Iterator[tuple[str, list[str], list[str]]]'
|
66,355 |
pandas.core.indexes.base
|
Index
|
Immutable sequence used for indexing and alignment.
The basic object storing axis labels for all pandas objects.
.. versionchanged:: 2.0.0
Index can hold all numpy numeric dtypes (except float16). Previously only
int64/uint64/float64 dtypes were accepted.
Parameters
----------
data : array-like (1-dimensional)
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Index. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
copy : bool, default False
Copy input data.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
RangeIndex : Index implementing a monotonic integer range.
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical Index.
IntervalIndex : An Index of :class:`Interval` s.
DatetimeIndex : Index of datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
Notes
-----
An Index instance can **only** contain hashable objects.
An Index instance *can not* hold numpy float16 dtype.
Examples
--------
>>> pd.Index([1, 2, 3])
Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
>>> pd.Index([1, 2, 3], dtype="uint8")
Index([1, 2, 3], dtype='uint8')
|
class Index(IndexOpsMixin, PandasObject):
"""
Immutable sequence used for indexing and alignment.
The basic object storing axis labels for all pandas objects.
.. versionchanged:: 2.0.0
Index can hold all numpy numeric dtypes (except float16). Previously only
int64/uint64/float64 dtypes were accepted.
Parameters
----------
data : array-like (1-dimensional)
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Index. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
copy : bool, default False
Copy input data.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
RangeIndex : Index implementing a monotonic integer range.
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical Index.
IntervalIndex : An Index of :class:`Interval` s.
DatetimeIndex : Index of datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
Notes
-----
An Index instance can **only** contain hashable objects.
An Index instance *can not* hold numpy float16 dtype.
Examples
--------
>>> pd.Index([1, 2, 3])
Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
>>> pd.Index([1, 2, 3], dtype="uint8")
Index([1, 2, 3], dtype='uint8')
"""
# similar to __array_priority__, positions Index after Series and DataFrame
# but before ExtensionArray. Should NOT be overridden by subclasses.
__pandas_priority__ = 2000
# Cython methods; see github.com/cython/cython/issues/2647
# for why we need to wrap these instead of making them class attributes
# Moreover, cython will choose the appropriate-dtyped sub-function
# given the dtypes of the passed arguments
@final
def _left_indexer_unique(self, other: Self) -> npt.NDArray[np.intp]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# similar but not identical to ov.searchsorted(sv)
return libjoin.left_join_indexer_unique(sv, ov)
@final
def _left_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
@final
def _inner_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
@final
def _outer_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
_typ: str = "index"
_data: ExtensionArray | np.ndarray
_data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = (
np.ndarray,
ExtensionArray,
)
_id: object | None = None
_name: Hashable = None
# MultiIndex.levels previously allowed setting the index name. We
# don't allow this anymore, and raise if it happens rather than
# failing silently.
_no_setting_name: bool = False
_comparables: list[str] = ["name"]
_attributes: list[str] = ["name"]
@cache_readonly
def _can_hold_strings(self) -> bool:
return not is_numeric_dtype(self.dtype)
_engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = {
np.dtype(np.int8): libindex.Int8Engine,
np.dtype(np.int16): libindex.Int16Engine,
np.dtype(np.int32): libindex.Int32Engine,
np.dtype(np.int64): libindex.Int64Engine,
np.dtype(np.uint8): libindex.UInt8Engine,
np.dtype(np.uint16): libindex.UInt16Engine,
np.dtype(np.uint32): libindex.UInt32Engine,
np.dtype(np.uint64): libindex.UInt64Engine,
np.dtype(np.float32): libindex.Float32Engine,
np.dtype(np.float64): libindex.Float64Engine,
np.dtype(np.complex64): libindex.Complex64Engine,
np.dtype(np.complex128): libindex.Complex128Engine,
}
@property
def _engine_type(
self,
) -> type[libindex.IndexEngine | libindex.ExtensionEngine]:
return self._engine_types.get(self.dtype, libindex.ObjectEngine)
# whether we support partial string indexing. Overridden
# in DatetimeIndex and PeriodIndex
_supports_partial_string_indexing = False
_accessors = {"str"}
str = CachedAccessor("str", StringMethods)
_references = None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
dtype=None,
copy: bool = False,
name=None,
tupleize_cols: bool = True,
) -> Self:
from pandas.core.indexes.range import RangeIndex
name = maybe_extract_name(name, data, cls)
if dtype is not None:
dtype = pandas_dtype(dtype)
data_dtype = getattr(data, "dtype", None)
refs = None
if not copy and isinstance(data, (ABCSeries, Index)):
refs = data._references
is_pandas_object = isinstance(data, (ABCSeries, Index, ExtensionArray))
# range
if isinstance(data, (range, RangeIndex)):
result = RangeIndex(start=data, copy=copy, name=name)
if dtype is not None:
return result.astype(dtype, copy=False)
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
return result # type: ignore[return-value]
elif is_ea_or_datetimelike_dtype(dtype):
# non-EA dtype indexes have special casting logic, so we punt here
pass
elif is_ea_or_datetimelike_dtype(data_dtype):
pass
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if isinstance(data, ABCMultiIndex):
data = data._values
if data.dtype.kind not in "iufcbmM":
# GH#11836 we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
elif is_scalar(data):
raise cls._raise_scalar_data_error(data)
elif hasattr(data, "__array__"):
return cls(np.asarray(data), dtype=dtype, copy=copy, name=name)
elif not is_list_like(data) and not isinstance(data, memoryview):
# 2022-11-16 the memoryview check is only necessary on some CI
# builds, not clear why
raise cls._raise_scalar_data_error(data)
else:
if tupleize_cols:
# GH21470: convert iterable to list before determining if empty
if is_iterator(data):
data = list(data)
if data and all(isinstance(e, tuple) for e in data):
# we must be all tuples, otherwise don't construct
# 10697
from pandas.core.indexes.multi import MultiIndex
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
return MultiIndex.from_tuples( # type: ignore[return-value]
data, names=name
)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
# we allow set/frozenset, which Series/sanitize_array does not, so
# cast to list here
data = list(data)
if len(data) == 0:
# unlike Series, we default to object dtype:
data = np.array(data, dtype=object)
if len(data) and isinstance(data[0], tuple):
# Ensure we get 1-D array of tuples instead of 2D array.
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
try:
arr = sanitize_array(data, None, dtype=dtype, copy=copy)
except ValueError as err:
if "index must be specified when data is not list-like" in str(err):
raise cls._raise_scalar_data_error(data) from err
if "Data must be 1-dimensional" in str(err):
raise ValueError("Index data must be 1-dimensional") from err
raise
arr = ensure_wrapped_if_datetimelike(arr)
klass = cls._dtype_to_subclass(arr.dtype)
arr = klass._ensure_array(arr, arr.dtype, copy=False)
result = klass._simple_new(arr, name, refs=refs)
if dtype is None and is_pandas_object and data_dtype == np.object_:
if result.dtype != data_dtype:
warnings.warn(
"Dtype inference on a pandas object "
"(Series, Index, ExtensionArray) is deprecated. The Index "
"constructor will keep the original dtype in the future. "
"Call `infer_objects` on the result to get the old "
"behavior.",
FutureWarning,
stacklevel=2,
)
return result # type: ignore[return-value]
@classmethod
def _ensure_array(cls, data, dtype, copy: bool):
"""
Ensure we have a valid array to pass to _simple_new.
"""
if data.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
elif dtype == np.float16:
# float16 not supported (no indexing engine)
raise NotImplementedError("float16 indexes are not supported")
if copy:
# asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
data = data.copy()
return data
@final
@classmethod
def _dtype_to_subclass(cls, dtype: DtypeObj):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
if isinstance(dtype, ExtensionDtype):
return dtype.index_class
if dtype.kind == "M":
from pandas import DatetimeIndex
return DatetimeIndex
elif dtype.kind == "m":
from pandas import TimedeltaIndex
return TimedeltaIndex
elif dtype.kind == "O":
# NB: assuming away MultiIndex
return Index
elif issubclass(dtype.type, str) or is_numeric_dtype(dtype):
return Index
raise NotImplementedError(dtype)
# NOTE for new Index creation:
# - _simple_new: It returns new Index with the same type as the caller.
# All metadata (such as name) must be provided by caller's responsibility.
# Using _shallow_copy is recommended because it fills these metadata
# otherwise specified.
# - _shallow_copy: It returns new Index with the same type (using
# _simple_new), but fills caller's metadata otherwise specified. Passed
# kwargs will overwrite corresponding metadata.
# See each method's docstring.
@classmethod
def _simple_new(
cls, values: ArrayLike, name: Hashable | None = None, refs=None
) -> Self:
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
result._reset_identity()
if refs is not None:
result._references = refs
else:
result._references = BlockValuesRefs()
result._references.add_index_reference(result)
return result
@classmethod
def _with_infer(cls, *args, **kwargs):
"""
Constructor that uses the 1.0.x behavior inferring numeric dtypes
for ndarray[object] inputs.
"""
result = cls(*args, **kwargs)
if result.dtype == _dtype_obj and not result._is_multi:
# error: Argument 1 to "maybe_convert_objects" has incompatible type
# "Union[ExtensionArray, ndarray[Any, Any]]"; expected
# "ndarray[Any, Any]"
values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type]
if values.dtype.kind in "iufb":
return Index(values, name=result.name)
return result
@cache_readonly
def _constructor(self) -> type[Self]:
return type(self)
@final
def _maybe_check_unique(self) -> None:
"""
Check that an Index has no duplicates.
This is typically only called via
`NDFrame.flags.allows_duplicate_labels.setter` when it's set to
True (duplicates aren't allowed).
Raises
------
DuplicateLabelError
When the index is not unique.
"""
if not self.is_unique:
msg = """Index has duplicates."""
duplicates = self._format_duplicate_message()
msg += f"\n{duplicates}"
raise DuplicateLabelError(msg)
@final
def _format_duplicate_message(self) -> DataFrame:
"""
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'a'])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
"""
from pandas import Series
duplicates = self[self.duplicated(keep="first")].unique()
assert len(duplicates)
out = (
Series(np.arange(len(self)), copy=False)
.groupby(self, observed=False)
.agg(list)[duplicates]
)
if self._is_multi:
# test_format_duplicate_labels_message_multi
# error: "Type[Index]" has no attribute "from_tuples" [attr-defined]
out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]
if self.nlevels == 1:
out = out.rename_axis("label")
return out.to_frame(name="positions")
# --------------------------------------------------------------------
# Index Internals Methods
def _shallow_copy(self, values, name: Hashable = no_default) -> Self:
"""
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
"""
name = self._name if name is no_default else name
return self._simple_new(values, name=name, refs=self._references)
def _view(self) -> Self:
"""
fastpath to make a shallow copy, i.e. new object with same data.
"""
result = self._simple_new(self._values, name=self._name, refs=self._references)
result._cache = self._cache
return result
@final
def _rename(self, name: Hashable) -> Self:
"""
fastpath for rename if new name is already validated.
"""
result = self._view()
result._name = name
return result
@final
def is_(self, other) -> bool:
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
Other object to compare against.
Returns
-------
bool
True if both have same underlying data, False otherwise.
See Also
--------
Index.identical : Works like ``Index.is_`` but also checks metadata.
Examples
--------
>>> idx1 = pd.Index(['1', '2', '3'])
>>> idx1.is_(idx1.view())
True
>>> idx1.is_(idx1.copy())
False
"""
if self is other:
return True
elif not hasattr(other, "_id"):
return False
elif self._id is None or other._id is None:
return False
else:
return self._id is other._id
@final
def _reset_identity(self) -> None:
"""
Initializes or resets ``_id`` attribute with new object.
"""
self._id = object()
@final
def _cleanup(self) -> None:
self._engine.clear_mapping()
@cache_readonly
def _engine(
self,
) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine:
# For base class (object dtype) we get ObjectEngine
target_values = self._get_engine_target()
if isinstance(self._values, ArrowExtensionArray) and self.dtype.kind in "Mm":
import pyarrow as pa
pa_type = self._values._pa_array.type
if pa.types.is_timestamp(pa_type):
target_values = self._values._to_datetimearray()
return libindex.DatetimeEngine(target_values._ndarray)
elif pa.types.is_duration(pa_type):
target_values = self._values._to_timedeltaarray()
return libindex.TimedeltaEngine(target_values._ndarray)
if isinstance(target_values, ExtensionArray):
if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)):
try:
return _masked_engines[target_values.dtype.name](target_values)
except KeyError:
# Not supported yet e.g. decimal
pass
elif self._engine_type is libindex.ObjectEngine:
return libindex.ExtensionEngine(target_values)
target_values = cast(np.ndarray, target_values)
# to avoid a reference cycle, bind `target_values` to a local variable, so
# `self` is not passed into the lambda.
if target_values.dtype == bool:
return libindex.BoolEngine(target_values)
elif target_values.dtype == np.complex64:
return libindex.Complex64Engine(target_values)
elif target_values.dtype == np.complex128:
return libindex.Complex128Engine(target_values)
elif needs_i8_conversion(self.dtype):
# We need to keep M8/m8 dtype when initializing the Engine,
# but don't want to change _get_engine_target bc it is used
# elsewhere
# error: Item "ExtensionArray" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr]
target_values = self._data._ndarray # type: ignore[union-attr]
# error: Argument 1 to "ExtensionEngine" has incompatible type
# "ndarray[Any, Any]"; expected "ExtensionArray"
return self._engine_type(target_values) # type: ignore[arg-type]
@final
@cache_readonly
def _dir_additions_for_owner(self) -> set[str_t]:
"""
Add the string-like labels to the owner dataframe/series dir output.
If this is a MultiIndex, it's first level values are used.
"""
return {
c
for c in self.unique(level=0)[: get_option("display.max_dir_items")]
if isinstance(c, str) and c.isidentifier()
}
# --------------------------------------------------------------------
# Array-Like Methods
# ndarray compat
def __len__(self) -> int:
"""
Return the length of the Index.
"""
return len(self._data)
def __array__(self, dtype=None, copy=None) -> np.ndarray:
"""
The array interface, return my values.
"""
return np.asarray(self._data, dtype=dtype)
def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs):
if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs):
return NotImplemented
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. test_dti_isub_tdi
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
new_inputs = [x if x is not self else x._values for x in inputs]
result = getattr(ufunc, method)(*new_inputs, **kwargs)
if ufunc.nout == 2:
# i.e. np.divmod, np.modf, np.frexp
return tuple(self.__array_wrap__(x) for x in result)
elif method == "reduce":
result = lib.item_from_zerodim(result)
return result
if result.dtype == np.float16:
result = result.astype(np.float32)
return self.__array_wrap__(result)
@final
def __array_wrap__(self, result, context=None, return_scalar=False):
"""
Gets called after a ufunc and other functions e.g. np.split.
"""
result = lib.item_from_zerodim(result)
if (not isinstance(result, Index) and is_bool_dtype(result.dtype)) or np.ndim(
result
) > 1:
# exclude Index to avoid warning from is_bool_dtype deprecation;
# in the Index case it doesn't matter which path we go down.
# reached in plotting tests with e.g. np.nonzero(index)
return result
return Index(result, name=self.name)
@cache_readonly
def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.dtype
dtype('int64')
"""
return self._data.dtype
@final
def ravel(self, order: str_t = "C") -> Self:
"""
Return a view on self.
Returns
-------
Index
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
>>> s.index.ravel()
Index(['a', 'b', 'c'], dtype='object')
"""
return self[:]
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, "_typ"):
dtype = cls
if isinstance(cls, str):
dtype = pandas_dtype(cls)
if needs_i8_conversion(dtype):
idx_cls = self._dtype_to_subclass(dtype)
arr = self.array.view(dtype)
if isinstance(arr, ExtensionArray):
# here we exclude non-supported dt64/td64 dtypes
return idx_cls._simple_new(
arr, name=self.name, refs=self._references
)
return arr
result = self._data.view(cls)
else:
if cls is not None:
warnings.warn(
# GH#55709
f"Passing a type in {type(self).__name__}.view is deprecated "
"and will raise in a future version. "
"Call view without any argument to retain the old behavior.",
FutureWarning,
stacklevel=find_stack_level(),
)
result = self._view()
if isinstance(result, Index):
result._id = self._id
return result
def astype(self, dtype, copy: bool = True):
"""
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a TypeError exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
Note that any signed integer `dtype` is treated as ``'int64'``,
and any unsigned integer `dtype` is treated as ``'uint64'``,
regardless of the size.
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.astype('float')
Index([1.0, 2.0, 3.0], dtype='float64')
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if self.dtype == dtype:
# Ensure that self.astype(self.dtype) is self
return self.copy() if copy else self
values = self._data
if isinstance(values, ExtensionArray):
with rewrite_exception(type(values).__name__, type(self).__name__):
new_values = values.astype(dtype, copy=copy)
elif isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
# Note: for RangeIndex and CategoricalDtype self vs self._values
# behaves differently here.
new_values = cls._from_sequence(self, dtype=dtype, copy=copy)
else:
# GH#13149 specifically use astype_array instead of astype
new_values = astype_array(values, dtype=dtype, copy=copy)
# pass copy=False because any copying will be done in the astype above
result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False)
if (
not copy
and self._references is not None
and astype_is_view(self.dtype, dtype)
):
result._references = self._references
result._references.add_index_reference(result)
return result
_index_shared_docs[
"take"
] = """
Return a new %(klass)s of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.take([2, 2, 1, 2])
Index(['c', 'c', 'b', 'c'], dtype='object')
"""
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> Self:
if kwargs:
nv.validate_take((), kwargs)
if is_scalar(indices):
raise TypeError("Expected indices to be array-like")
indices = ensure_platform_int(indices)
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
# Note: we discard fill_value and use self._na_value, only relevant
# in the case where allow_fill is True and fill_value is not None
values = self._values
if isinstance(values, np.ndarray):
taken = algos.take(
values, indices, allow_fill=allow_fill, fill_value=self._na_value
)
else:
# algos.take passes 'axis' keyword which not all EAs accept
taken = values.take(
indices, allow_fill=allow_fill, fill_value=self._na_value
)
return self._constructor._simple_new(taken, name=self.name)
@final
def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:
"""
We only use pandas-style take when allow_fill is True _and_
fill_value is not None.
"""
if allow_fill and fill_value is not None:
# only fill if we are passing a non-None fill_value
if self._can_hold_na:
if (indices < -1).any():
raise ValueError(
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
else:
cls_name = type(self).__name__
raise ValueError(
f"Unable to fill values because {cls_name} cannot contain NA"
)
else:
allow_fill = False
return allow_fill
_index_shared_docs[
"repeat"
] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
%(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.repeat(2)
Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')
>>> idx.repeat([1, 2, 3])
Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')
"""
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis: None = None) -> Self:
repeats = ensure_platform_int(repeats)
nv.validate_repeat((), {"axis": axis})
res_values = self._values.repeat(repeats)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
# --------------------------------------------------------------------
# Copying Methods
def copy(
self,
name: Hashable | None = None,
deep: bool = False,
) -> Self:
"""
Make a copy of this object.
Name is set on the new object.
Parameters
----------
name : Label, optional
Set name for new object.
deep : bool, default False
Returns
-------
Index
Index refer to new object which is a copy of this object.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> new_idx = idx.copy()
>>> idx is new_idx
False
"""
name = self._validate_names(name=name, deep=deep)[0]
if deep:
new_data = self._data.copy()
new_index = type(self)._simple_new(new_data, name=name)
else:
new_index = self._rename(name=name)
return new_index
@final
def __copy__(self, **kwargs) -> Self:
return self.copy(**kwargs)
@final
def __deepcopy__(self, memo=None) -> Self:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
# --------------------------------------------------------------------
# Rendering Methods
@final
def __repr__(self) -> str_t:
"""
Return a string representation for this object.
"""
klass_name = type(self).__name__
data = self._format_data()
attrs = self._format_attrs()
attrs_str = [f"{k}={v}" for k, v in attrs]
prepr = ", ".join(attrs_str)
return f"{klass_name}({data}{prepr})"
@property
def _formatter_func(self):
"""
Return the formatter function.
"""
return default_pprint
@final
def _format_data(self, name=None) -> str_t:
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = True
if self.inferred_type == "string":
is_justify = False
elif isinstance(self.dtype, CategoricalDtype):
self = cast("CategoricalIndex", self)
if is_object_dtype(self.categories.dtype):
is_justify = False
elif isinstance(self, ABCRangeIndex):
# We will do the relevant formatting via attrs
return ""
return format_object_summary(
self,
self._formatter_func,
is_justify=is_justify,
name=name,
line_break_each_value=self._is_multi,
)
def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]:
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs: list[tuple[str_t, str_t | int | bool | None]] = []
if not self._is_multi:
attrs.append(("dtype", f"'{self.dtype}'"))
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
elif self._is_multi and any(x is not None for x in self.names):
attrs.append(("names", default_pprint(self.names)))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
@final
def _get_level_names(self) -> Hashable | Sequence[Hashable]:
"""
Return a name or list of names with None replaced by the level number.
"""
if self._is_multi:
return [
level if name is None else name for level, name in enumerate(self.names)
]
else:
return 0 if self.name is None else self.name
@final
def _mpl_repr(self) -> np.ndarray:
# how to represent ourselves to matplotlib
if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M":
return cast(np.ndarray, self.values)
return self.astype(object, copy=False)._values
def format(
self,
name: bool = False,
formatter: Callable | None = None,
na_rep: str_t = "NaN",
) -> list[str_t]:
"""
Render a string representation of the Index.
"""
warnings.warn(
# GH#55413
f"{type(self).__name__}.format is deprecated and will be removed "
"in a future version. Convert using index.astype(str) or "
"index.map(formatter) instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
header = []
if name:
header.append(
pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header=header, na_rep=na_rep)
_default_na_rep = "NaN"
@final
def _format_flat(
self,
*,
include_name: bool,
formatter: Callable | None = None,
) -> list[str_t]:
"""
Render a string representation of the Index.
"""
header = []
if include_name:
header.append(
pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header=header, na_rep=self._default_na_rep)
def _format_with_header(self, *, header: list[str_t], na_rep: str_t) -> list[str_t]:
from pandas.io.formats.format import format_array
values = self._values
if (
is_object_dtype(values.dtype)
or is_string_dtype(values.dtype)
or isinstance(self.dtype, (IntervalDtype, CategoricalDtype))
):
# TODO: why do we need different justify for these cases?
justify = "all"
else:
justify = "left"
# passing leading_space=False breaks test_format_missing,
# test_index_repr_in_frame_with_nan, but would otherwise make
# trim_front unnecessary
formatted = format_array(values, None, justify=justify)
result = trim_front(formatted)
return header + result
def _get_values_for_csv(
self,
*,
na_rep: str_t = "",
decimal: str_t = ".",
float_format=None,
date_format=None,
quoting=None,
) -> npt.NDArray[np.object_]:
return get_values_for_csv(
self._values,
na_rep=na_rep,
decimal=decimal,
float_format=float_format,
date_format=date_format,
quoting=quoting,
)
def _summary(self, name=None) -> str_t:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if hasattr(head, "format") and not isinstance(head, str):
head = head.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
head = self._formatter_func(head).replace("'", "")
tail = self[-1]
if hasattr(tail, "format") and not isinstance(tail, str):
tail = tail.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
tail = self._formatter_func(tail).replace("'", "")
index_summary = f", {head} to {tail}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
return f"{name}: {len(self)} entries{index_summary}"
# --------------------------------------------------------------------
# Conversion Methods
def to_flat_index(self) -> Self:
"""
Identity method.
This is implemented for compatibility with subclass implementations
when chaining.
Returns
-------
pd.Index
Caller.
See Also
--------
MultiIndex.to_flat_index : Subclass implementation.
"""
return self
@final
def to_series(self, index=None, name: Hashable | None = None) -> Series:
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
See Also
--------
Index.to_frame : Convert an Index to a DataFrame.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
By default, the original index and original name is reused.
>>> idx.to_series()
animal
Ant Ant
Bear Bear
Cow Cow
Name: animal, dtype: object
To enforce a new index, specify new labels to ``index``:
>>> idx.to_series(index=[0, 1, 2])
0 Ant
1 Bear
2 Cow
Name: animal, dtype: object
To override the name of the resulting column, specify ``name``:
>>> idx.to_series(name='zoo')
animal
Ant Ant
Bear Bear
Cow Cow
Name: zoo, dtype: object
"""
from pandas import Series
if index is None:
index = self._view()
if name is None:
name = self.name
return Series(self._values.copy(), index=index, name=name)
def to_frame(
self, index: bool = True, name: Hashable = lib.no_default
) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original Index.
name : object, defaults to index.name
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(index=False, name='zoo')
zoo
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
if name is lib.no_default:
name = self._get_level_names()
result = DataFrame({name: self}, copy=not using_copy_on_write())
if index:
result.index = self
return result
# --------------------------------------------------------------------
# Name-Centric Methods
@property
def name(self) -> Hashable:
"""
Return Index or MultiIndex name.
Examples
--------
>>> idx = pd.Index([1, 2, 3], name='x')
>>> idx
Index([1, 2, 3], dtype='int64', name='x')
>>> idx.name
'x'
"""
return self._name
@name.setter
def name(self, value: Hashable) -> None:
if self._no_setting_name:
# Used in MultiIndex.levels to avoid silently ignoring name updates.
raise RuntimeError(
"Cannot set name on a level of a MultiIndex. Use "
"'MultiIndex.set_names' instead."
)
maybe_extract_name(value, None, type(self))
self._name = value
@final
def _validate_names(
self, name=None, names=None, deep: bool = False
) -> list[Hashable]:
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if names is None and name is None:
new_names = deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
new_names = names
elif not is_list_like(name):
new_names = [name]
else:
new_names = name
if len(new_names) != len(self.names):
raise ValueError(
f"Length of new names must be {len(self.names)}, got {len(new_names)}"
)
# All items in 'new_names' need to be hashable
validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name")
return new_names
def _get_default_index_names(
self, names: Hashable | Sequence[Hashable] | None = None, default=None
) -> list[Hashable]:
"""
Get names of index.
Parameters
----------
names : int, str or 1-dimensional list, default None
Index names to set.
default : str
Default name of index.
Raises
------
TypeError
if names not str or list-like
"""
from pandas.core.indexes.multi import MultiIndex
if names is not None:
if isinstance(names, (int, str)):
names = [names]
if not isinstance(names, list) and names is not None:
raise ValueError("Index names must be str or 1-dimensional list")
if not names:
if isinstance(self, MultiIndex):
names = com.fill_missing_names(self.names)
else:
names = [default] if self.name is None else [self.name]
return names
def _get_names(self) -> FrozenList:
return FrozenList((self.name,))
def _set_names(self, values, *, level=None) -> None:
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError("Names must be a list-like")
if len(values) != 1:
raise ValueError(f"Length of new names must be 1, got {len(values)}")
# GH 20527
# All items in 'name' need to be hashable:
validate_all_hashable(*values, error_name=f"{type(self).__name__}.name")
self._name = values[0]
names = property(fset=_set_names, fget=_get_names)
@overload
def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self:
...
@overload
def set_names(self, names, *, level=..., inplace: Literal[True]) -> None:
...
@overload
def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None:
...
def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label or dict-like for MultiIndex
Name(s) to set.
.. versionchanged:: 1.3.0
level : int, label or list of int or label, optional
If the index is a MultiIndex and names is not dict-like, level(s) to set
(None for all levels). Otherwise level must be None.
.. versionchanged:: 1.3.0
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx = idx.set_names(['kind', 'year'])
>>> idx.set_names('species', level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
When renaming levels with a dict, levels can not be passed.
>>> idx.set_names({'kind': 'snake'})
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['snake', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string when a single level is provided.")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if is_dict_like(names) and not isinstance(self, ABCMultiIndex):
raise TypeError("Can only pass dict-like as `names` for MultiIndex.")
if is_dict_like(names) and level is not None:
raise TypeError("Can not pass level for dictlike `names`.")
if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:
# Transform dict to list of new names and corresponding levels
level, names_adjusted = [], []
for i, name in enumerate(self.names):
if name in names.keys():
level.append(i)
names_adjusted.append(names[name])
names = names_adjusted
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._view()
idx._set_names(names, level=level)
if not inplace:
return idx
return None
@overload
def rename(self, name, *, inplace: Literal[False] = ...) -> Self:
...
@overload
def rename(self, name, *, inplace: Literal[True]) -> None:
...
@deprecate_nonkeyword_arguments(
version="3.0", allowed_args=["self", "name"], name="rename"
)
def rename(self, name, inplace: bool = False) -> Self | None:
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.set_names : Able to set new names partially and by level.
Examples
--------
>>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')
>>> idx.rename('grade')
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]],
... names=['kind', 'year'])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.rename(['species', 'year'])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
>>> idx.rename('species')
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
"""
return self.set_names([name], inplace=inplace)
# --------------------------------------------------------------------
# Level-Centric Methods
@property
def nlevels(self) -> int:
"""
Number of levels.
"""
return 1
def _sort_levels_monotonic(self) -> Self:
"""
Compat with MultiIndex.
"""
return self
@final
def _validate_index_level(self, level) -> None:
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
"Too many levels: Index has only 1 level, "
f"{level} is not a valid level number"
)
if level > 0:
raise IndexError(
f"Too many levels: Index has only 1 level, not {level + 1}"
)
elif level != self.name:
raise KeyError(
f"Requested level ({level}) does not match index name ({self.name})"
)
def _get_level_number(self, level) -> int:
self._validate_index_level(level)
return 0
def sortlevel(
self,
level=None,
ascending: bool | list[bool] = True,
sort_remaining=None,
na_position: NaPosition = "first",
):
"""
For internal compatibility with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : bool, default True
False to sort in descending order
na_position : {'first' or 'last'}, default 'first'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
.. versionadded:: 2.1.0
level, sort_remaining are compat parameters
Returns
-------
Index
"""
if not isinstance(ascending, (list, bool)):
raise TypeError(
"ascending must be a single bool value or"
"a list of bool values of length 1"
)
if isinstance(ascending, list):
if len(ascending) != 1:
raise TypeError("ascending must be a list of bool values of length 1")
ascending = ascending[0]
if not isinstance(ascending, bool):
raise TypeError("ascending must be a bool value")
return self.sort_values(
return_indexer=True, ascending=ascending, na_position=na_position
)
def _get_level_values(self, level) -> Index:
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list('abc'))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
@final
def droplevel(self, level: IndexLabel = 0):
"""
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex. The original index is not modified inplace.
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.droplevel()
MultiIndex([(3, 5),
(4, 6)],
names=['y', 'z'])
>>> mi.droplevel(2)
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel('z')
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel(['x', 'y'])
Index([5, 6], dtype='int64', name='z')
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
return self._drop_level_numbers(levnums)
@final
def _drop_level_numbers(self, levnums: list[int]):
"""
Drop MultiIndex levels by level _number_, not name.
"""
if not levnums and not isinstance(self, ABCMultiIndex):
return self
if len(levnums) >= self.nlevels:
raise ValueError(
f"Cannot remove {len(levnums)} levels from an index with "
f"{self.nlevels} levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
self = cast("MultiIndex", self)
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_codes.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
lev = new_levels[0]
if len(lev) == 0:
# If lev is empty, lev.take will fail GH#42055
if len(new_codes[0]) == 0:
# GH#45230 preserve RangeIndex here
# see test_reset_index_empty_rangeindex
result = lev[:0]
else:
res_values = algos.take(lev._values, new_codes[0], allow_fill=True)
# _constructor instead of type(lev) for RangeIndex compat GH#35230
result = lev._constructor._simple_new(res_values, name=new_names[0])
else:
# set nan if needed
mask = new_codes[0] == -1
result = new_levels[0].take(new_codes[0])
if mask.any():
result = result.putmask(mask, np.nan)
result._name = new_names[0]
return result
else:
from pandas.core.indexes.multi import MultiIndex
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=new_names,
verify_integrity=False,
)
# --------------------------------------------------------------------
# Introspection Methods
@cache_readonly
@final
def _can_hold_na(self) -> bool:
if isinstance(self.dtype, ExtensionDtype):
return self.dtype._can_hold_na
if self.dtype.kind in "iub":
return False
return True
@property
def is_monotonic_increasing(self) -> bool:
"""
Return a boolean if the values are equal or increasing.
Returns
-------
bool
See Also
--------
Index.is_monotonic_decreasing : Check if the values are equal or decreasing.
Examples
--------
>>> pd.Index([1, 2, 3]).is_monotonic_increasing
True
>>> pd.Index([1, 2, 2]).is_monotonic_increasing
True
>>> pd.Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return a boolean if the values are equal or decreasing.
Returns
-------
bool
See Also
--------
Index.is_monotonic_increasing : Check if the values are equal or increasing.
Examples
--------
>>> pd.Index([3, 2, 1]).is_monotonic_decreasing
True
>>> pd.Index([3, 2, 2]).is_monotonic_decreasing
True
>>> pd.Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@final
@property
def _is_strictly_monotonic_increasing(self) -> bool:
"""
Return if the index is strictly monotonic increasing
(only increasing) values.
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@final
@property
def _is_strictly_monotonic_decreasing(self) -> bool:
"""
Return if the index is strictly monotonic decreasing
(only decreasing) values.
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
@cache_readonly
def is_unique(self) -> bool:
"""
Return if the index has unique values.
Returns
-------
bool
See Also
--------
Index.has_duplicates : Inverse method that checks if it has duplicate values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.is_unique
False
>>> idx = pd.Index([1, 5, 7])
>>> idx.is_unique
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_unique
False
>>> idx = pd.Index(["Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_unique
True
"""
return self._engine.is_unique
@final
@property
def has_duplicates(self) -> bool:
"""
Check if the Index has duplicate values.
Returns
-------
bool
Whether or not the Index has duplicate values.
See Also
--------
Index.is_unique : Inverse method that checks if it has unique values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.has_duplicates
True
>>> idx = pd.Index([1, 5, 7])
>>> idx.has_duplicates
False
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.has_duplicates
True
>>> idx = pd.Index(["Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.has_duplicates
False
"""
return not self.is_unique
@final
def is_boolean(self) -> bool:
"""
Check if the Index only consists of booleans.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_bool_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of booleans.
See Also
--------
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype (deprecated).
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([True, False, True])
>>> idx.is_boolean() # doctest: +SKIP
True
>>> idx = pd.Index(["True", "False", "True"])
>>> idx.is_boolean() # doctest: +SKIP
False
>>> idx = pd.Index([True, False, "True"])
>>> idx.is_boolean() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_boolean is deprecated. "
"Use pandas.api.types.is_bool_type instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["boolean"]
@final
def is_integer(self) -> bool:
"""
Check if the Index only consists of integers.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_integer_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of integers.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_integer() # doctest: +SKIP
True
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_integer() # doctest: +SKIP
False
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_integer() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_integer is deprecated. "
"Use pandas.api.types.is_integer_dtype instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["integer"]
@final
def is_floating(self) -> bool:
"""
Check if the Index is a floating type.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_float_dtype` instead
The Index may consist of only floats, NaNs, or a mix of floats,
integers, or NaNs.
Returns
-------
bool
Whether or not the Index only consists of only consists of floats, NaNs, or
a mix of floats, integers, or NaNs.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])
>>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4, np.nan])
>>> idx.is_floating() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_floating() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_floating is deprecated. "
"Use pandas.api.types.is_float_dtype instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"]
@final
def is_numeric(self) -> bool:
"""
Check if the Index only consists of numeric data.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_numeric_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of numeric data.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan])
>>> idx.is_numeric() # doctest: +SKIP
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"])
>>> idx.is_numeric() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_numeric is deprecated. "
"Use pandas.api.types.is_any_real_numeric_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["integer", "floating"]
@final
def is_object(self) -> bool:
"""
Check if the Index is of the object dtype.
.. deprecated:: 2.0.0
Use `pandas.api.types.is_object_dtype` instead.
Returns
-------
bool
Whether or not the Index is of the object dtype.
See Also
--------
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_object() # doctest: +SKIP
True
>>> idx = pd.Index(["Apple", "Mango", 2.0])
>>> idx.is_object() # doctest: +SKIP
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_object() # doctest: +SKIP
False
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_object() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_object is deprecated."
"Use pandas.api.types.is_object_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return is_object_dtype(self.dtype)
@final
def is_categorical(self) -> bool:
"""
Check if the Index holds categorical data.
.. deprecated:: 2.0.0
Use `isinstance(index.dtype, pd.CategoricalDtype)` instead.
Returns
-------
bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_interval : Check if the Index holds Interval objects (deprecated).
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical() # doctest: +SKIP
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical() # doctest: +SKIP
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_categorical is deprecated."
"Use pandas.api.types.is_categorical_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["categorical"]
@final
def is_interval(self) -> bool:
"""
Check if the Index holds Interval objects.
.. deprecated:: 2.0.0
Use `isinstance(index.dtype, pd.IntervalDtype)` instead.
Returns
-------
bool
Whether or not the Index holds Interval objects.
See Also
--------
IntervalIndex : Index for Interval objects.
is_boolean : Check if the Index only consists of booleans (deprecated).
is_integer : Check if the Index only consists of integers (deprecated).
is_floating : Check if the Index is a floating type (deprecated).
is_numeric : Check if the Index only consists of numeric data (deprecated).
is_object : Check if the Index is of the object dtype. (deprecated).
is_categorical : Check if the Index holds categorical data (deprecated).
Examples
--------
>>> idx = pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
>>> idx.is_interval() # doctest: +SKIP
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_interval() # doctest: +SKIP
False
"""
warnings.warn(
f"{type(self).__name__}.is_interval is deprecated."
"Use pandas.api.types.is_interval_dtype instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.inferred_type in ["interval"]
@final
def _holds_integer(self) -> bool:
"""
Whether the type is an integer type.
"""
return self.inferred_type in ["integer", "mixed-integer"]
@final
def holds_integer(self) -> bool:
"""
Whether the type is an integer type.
.. deprecated:: 2.0.0
Use `pandas.api.types.infer_dtype` instead
"""
warnings.warn(
f"{type(self).__name__}.holds_integer is deprecated. "
"Use pandas.api.types.infer_dtype instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._holds_integer()
@cache_readonly
def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.inferred_type
'integer'
"""
return lib.infer_dtype(self._values, skipna=False)
@cache_readonly
@final
def _is_all_dates(self) -> bool:
"""
Whether or not the index values only consist of dates.
"""
if needs_i8_conversion(self.dtype):
return True
elif self.dtype != _dtype_obj:
# TODO(ExtensionIndex): 3rd party EA might override?
# Note: this includes IntervalIndex, even when the left/right
# contain datetime-like objects.
return False
elif self._is_multi:
return False
return is_datetime_array(ensure_object(self._values))
@final
@cache_readonly
def _is_multi(self) -> bool:
"""
Cached check equivalent to isinstance(self, MultiIndex)
"""
return isinstance(self, ABCMultiIndex)
# --------------------------------------------------------------------
# Pickle Methods
def __reduce__(self):
d = {"data": self._data, "name": self.name}
return _new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Null Handling Methods
@cache_readonly
def _na_value(self):
"""The expected NA value to use with this index."""
dtype = self.dtype
if isinstance(dtype, np.dtype):
if dtype.kind in "mM":
return NaT
return np.nan
return dtype.na_value
@cache_readonly
def _isnan(self) -> npt.NDArray[np.bool_]:
"""
Return if each value is NaN.
"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def hasnans(self) -> bool:
"""
Return True if there are any NaNs.
Enables various performance speedups.
Returns
-------
bool
Examples
--------
>>> s = pd.Series([1, 2, 3], index=['a', 'b', None])
>>> s
a 1
b 2
None 3
dtype: int64
>>> s.index.hasnans
True
"""
if self._can_hold_na:
return bool(self._isnan.any())
else:
return False
@final
def isna(self) -> npt.NDArray[np.bool_]:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values.
Returns
-------
numpy.ndarray[bool]
A boolean array of whether my values are NA.
See Also
--------
Index.notna : Boolean inverse of isna.
Index.dropna : Omit entries with missing values.
isna : Top-level isna.
Series.isna : Detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.nan])
>>> idx
Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True])
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True])
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True])
"""
return self._isnan
isnull = isna
@final
def notna(self) -> npt.NDArray[np.bool_]:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values.
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
Returns
-------
numpy.ndarray[bool]
Boolean array to indicate which entries are not NA.
See Also
--------
Index.notnull : Alias of notna.
Index.isna: Inverse of notna.
notna : Top-level notna.
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.nan])
>>> idx
Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def fillna(self, value=None, downcast=lib.no_default):
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
.. deprecated:: 2.1.0
Returns
-------
Index
See Also
--------
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
Examples
--------
>>> idx = pd.Index([np.nan, np.nan, 3])
>>> idx.fillna(0)
Index([0.0, 0.0, 3.0], dtype='float64')
"""
if not is_scalar(value):
raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
if downcast is not lib.no_default:
warnings.warn(
f"The 'downcast' keyword in {type(self).__name__}.fillna is "
"deprecated and will be removed in a future version. "
"It was previously silently ignored.",
FutureWarning,
stacklevel=find_stack_level(),
)
else:
downcast = None
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if it has NaTs
# _with_infer needed for test_fillna_categorical
return Index._with_infer(result, name=self.name)
raise NotImplementedError(
f"{type(self).__name__}.fillna does not support 'downcast' "
"argument values other than 'None'."
)
return self._view()
def dropna(self, how: AnyAll = "any") -> Self:
"""
Return Index without NA/NaN values.
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
Index
Examples
--------
>>> idx = pd.Index([1, np.nan, 3])
>>> idx.dropna()
Index([1.0, 3.0], dtype='float64')
"""
if how not in ("any", "all"):
raise ValueError(f"invalid how option: {how}")
if self.hasnans:
res_values = self._values[~self._isnan]
return type(self)._simple_new(res_values, name=self.name)
return self._view()
# --------------------------------------------------------------------
# Uniqueness Methods
def unique(self, level: Hashable | None = None) -> Self:
"""
Return unique values in the index.
Unique values are returned in order of appearance, this does NOT sort.
Parameters
----------
level : int or hashable, optional
Only return values from specified level (for MultiIndex).
If int, gets the level by integer position, else by level name.
Returns
-------
Index
See Also
--------
unique : Numpy array of unique values in that column.
Series.unique : Return unique values of Series object.
Examples
--------
>>> idx = pd.Index([1, 1, 2, 3, 3])
>>> idx.unique()
Index([1, 2, 3], dtype='int64')
"""
if level is not None:
self._validate_index_level(level)
if self.is_unique:
return self._view()
result = super().unique()
return self._shallow_copy(result)
def drop_duplicates(self, *, keep: DropKeep = "first") -> Self:
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
Index
See Also
--------
Series.drop_duplicates : Equivalent method on Series.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Index.duplicated : Related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
if self.is_unique:
return self._view()
return super().drop_duplicates(keep=keep)
def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
np.ndarray[bool]
See Also
--------
Series.duplicated : Equivalent method on pandas.Series.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Index.drop_duplicates : Remove duplicate values from Index.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
"""
if self.is_unique:
# fastpath available bc we are immutable
return np.zeros(len(self), dtype=bool)
return self._duplicated(keep=keep)
# --------------------------------------------------------------------
# Arithmetic & Logical Methods
def __iadd__(self, other):
# alias for __add__
return self + other
@final
def __nonzero__(self) -> NoReturn:
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
# --------------------------------------------------------------------
# Set Operation Methods
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name is not name:
return self.rename(name)
return self
@final
def _validate_sort_keyword(self, sort):
if sort not in [None, False, True]:
raise ValueError(
"The 'sort' keyword only takes the values of "
f"None, True, or False; {sort} was passed."
)
@final
def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]:
"""
With mismatched timezones, cast both to UTC.
"""
# Caller is responsibelf or checking
# `self.dtype != other.dtype`
if (
isinstance(self, ABCDatetimeIndex)
and isinstance(other, ABCDatetimeIndex)
and self.tz is not None
and other.tz is not None
):
# GH#39328, GH#45357
left = self.tz_convert("UTC")
right = other.tz_convert("UTC")
return left, right
return self, other
@final
def union(self, other, sort=None):
"""
Form the union of two Index objects.
If the Index objects are incompatible, both Index objects will be
cast to dtype('object') first.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Examples
--------
Union matching dtypes
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Index([1, 2, 3, 4, 5, 6], dtype='int64')
Union mismatched dtypes
>>> idx1 = pd.Index(['a', 'b', 'c', 'd'])
>>> idx2 = pd.Index([1, 2, 3, 4])
>>> idx1.union(idx2)
Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')
MultiIndex case
>>> idx1 = pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if self.dtype != other.dtype:
if (
isinstance(self, ABCMultiIndex)
and not is_object_dtype(_unpack_nested_dtype(other))
and len(other) > 0
):
raise NotImplementedError(
"Can only union MultiIndex with MultiIndex or Index of tuples, "
"try mi.to_flat_index().union(other) instead."
)
self, other = self._dti_setop_align_tzs(other, "union")
dtype = self._find_common_type_compat(other)
left = self.astype(dtype, copy=False)
right = other.astype(dtype, copy=False)
return left.union(right, sort=sort)
elif not len(other) or self.equals(other):
# NB: whether this (and the `if not len(self)` check below) come before
# or after the dtype equality check above affects the returned dtype
result = self._get_reconciled_name_object(other)
if sort is True:
return result.sort_values()
return result
elif not len(self):
result = other._get_reconciled_name_object(self)
if sort is True:
return result.sort_values()
return result
result = self._union(other, sort=sort)
return self._wrap_setop_result(other, result)
def _union(self, other: Index, sort: bool | None):
"""
Specific union logic should go here. In subclasses, union behavior
should be overwritten here rather than in `self.union`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* True : sort the result
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
Index
"""
lvals = self._values
rvals = other._values
if (
sort in (None, True)
and self.is_monotonic_increasing
and other.is_monotonic_increasing
and not (self.has_duplicates and other.has_duplicates)
and self._can_use_libjoin
and other._can_use_libjoin
):
# Both are monotonic and at least one is unique, so can use outer join
# (actually don't need either unique, but without this restriction
# test_union_same_value_duplicated_in_both fails)
try:
return self._outer_indexer(other)[0]
except (TypeError, IncompatibleFrequency):
# incomparable objects; should only be for object dtype
value_list = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
value_list.extend([x for x in rvals if x not in value_set])
# If objects are unorderable, we must have object dtype.
return np.array(value_list, dtype=object)
elif not other.is_unique:
# other has duplicates
result_dups = algos.union_with_duplicates(self, other)
return _maybe_try_sort(result_dups, sort)
# The rest of this method is analogous to Index._intersection_via_get_indexer
# Self may have duplicates; other already checked as unique
# find indexes of things in "other" that are not in "self"
if self._index_as_unique:
indexer = self.get_indexer(other)
missing = (indexer == -1).nonzero()[0]
else:
missing = algos.unique1d(self.get_indexer_non_unique(other)[1])
result: Index | MultiIndex | ArrayLike
if self._is_multi:
# Preserve MultiIndex to avoid losing dtypes
result = self.append(other.take(missing))
else:
if len(missing) > 0:
other_diff = rvals.take(missing)
result = concat_compat((lvals, other_diff))
else:
result = lvals
if not self.is_monotonic_increasing or not other.is_monotonic_increasing:
# if both are monotonic then result should already be sorted
result = _maybe_try_sort(result, sort)
return result
@final
def _wrap_setop_result(self, other: Index, result) -> Index:
name = get_op_result_name(self, other)
if isinstance(result, Index):
if result.name != name:
result = result.rename(name)
else:
result = self._shallow_copy(result, name=name)
return result
@final
def intersection(self, other, sort: bool = False):
# default sort keyword is different here from other setops intentionally
# done in GH#25063
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : True, False or None, default False
Whether to sort the resulting index.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
* False : do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Index([3, 4], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if self.dtype != other.dtype:
self, other = self._dti_setop_align_tzs(other, "intersection")
if self.equals(other):
if not self.is_unique:
result = self.unique()._get_reconciled_name_object(other)
else:
result = self._get_reconciled_name_object(other)
if sort is True:
result = result.sort_values()
return result
if len(self) == 0 or len(other) == 0:
# fastpath; we need to be careful about having commutativity
if self._is_multi or other._is_multi:
# _convert_can_do_setop ensures that we have both or neither
# We retain self.levels
return self[:0].rename(result_name)
dtype = self._find_common_type_compat(other)
if self.dtype == dtype:
# Slicing allows us to retain DTI/TDI.freq, RangeIndex
# Note: self[:0] vs other[:0] affects
# 1) which index's `freq` we get in DTI/TDI cases
# This may be a historical artifact, i.e. no documented
# reason for this choice.
# 2) The `step` we get in RangeIndex cases
if len(self) == 0:
return self[:0].rename(result_name)
else:
return other[:0].rename(result_name)
return Index([], dtype=dtype, name=result_name)
elif not self._should_compare(other):
# We can infer that the intersection is empty.
if isinstance(self, ABCMultiIndex):
return self[:0].rename(result_name)
return Index([], name=result_name)
elif self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.intersection(other, sort=sort)
result = self._intersection(other, sort=sort)
return self._wrap_intersection_result(other, result)
def _intersection(self, other: Index, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
if (
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and other._can_use_libjoin
):
try:
res_indexer, indexer, _ = self._inner_indexer(other)
except TypeError:
# non-comparable; should only be for object dtype
pass
else:
# TODO: algos.unique1d should preserve DTA/TDA
if is_numeric_dtype(self.dtype):
# This is faster, because Index.unique() checks for uniqueness
# before calculating the unique values.
res = algos.unique1d(res_indexer)
else:
result = self.take(indexer)
res = result.drop_duplicates()
return ensure_wrapped_if_datetimelike(res)
res_values = self._intersection_via_get_indexer(other, sort=sort)
res_values = _maybe_try_sort(res_values, sort)
return res_values
def _wrap_intersection_result(self, other, result):
# We will override for MultiIndex to handle empty results
return self._wrap_setop_result(other, result)
@final
def _intersection_via_get_indexer(
self, other: Index | MultiIndex, sort
) -> ArrayLike | MultiIndex:
"""
Find the intersection of two Indexes using get_indexer.
Returns
-------
np.ndarray or ExtensionArray or MultiIndex
The returned array will be unique.
"""
left_unique = self.unique()
right_unique = other.unique()
# even though we are unique, we need get_indexer_for for IntervalIndex
indexer = left_unique.get_indexer_for(right_unique)
mask = indexer != -1
taker = indexer.take(mask.nonzero()[0])
if sort is False:
# sort bc we want the elements in the same order they are in self
# unnecessary in the case with sort=None bc we will sort later
taker = np.sort(taker)
result: MultiIndex | ExtensionArray | np.ndarray
if isinstance(left_unique, ABCMultiIndex):
result = left_unique.take(taker)
else:
result = left_unique.take(taker)._values
return result
@final
def difference(self, other, sort=None):
"""
Return a new Index with elements of index not in `other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Examples
--------
>>> idx1 = pd.Index([2, 1, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Index([2, 1], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
# Note: we do NOT call _dti_setop_align_tzs here, as there
# is no requirement that .difference be commutative, so it does
# not cast to object.
if self.equals(other):
# Note: we do not (yet) sort even if sort=None GH#24959
return self[:0].rename(result_name)
if len(other) == 0:
# Note: we do not (yet) sort even if sort=None GH#24959
result = self.unique().rename(result_name)
if sort is True:
return result.sort_values()
return result
if not self._should_compare(other):
# Nothing matches -> difference is everything
result = self.unique().rename(result_name)
if sort is True:
return result.sort_values()
return result
result = self._difference(other, sort=sort)
return self._wrap_difference_result(other, result)
def _difference(self, other, sort):
# overridden by RangeIndex
this = self
if isinstance(self, ABCCategoricalIndex) and self.hasnans and other.hasnans:
this = this.dropna()
other = other.unique()
the_diff = this[other.get_indexer_for(this) == -1]
the_diff = the_diff if this.is_unique else the_diff.unique()
the_diff = _maybe_try_sort(the_diff, sort)
return the_diff
def _wrap_difference_result(self, other, result):
# We will override for MultiIndex to handle empty results
return self._wrap_setop_result(other, result)
def symmetric_difference(self, other, result_name=None, sort=None):
"""
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
result_name : str
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Index([1, 5], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
if self.dtype != other.dtype:
self, other = self._dti_setop_align_tzs(other, "symmetric_difference")
if not self._should_compare(other):
return self.union(other, sort=sort).rename(result_name)
elif self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
that = other.astype(dtype, copy=False)
return this.symmetric_difference(that, sort=sort).rename(result_name)
this = self.unique()
other = other.unique()
indexer = this.get_indexer_for(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(
np.arange(this.size), common_indexer, assume_unique=True
)
left_diff = this.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.take(right_indexer)
res_values = left_diff.append(right_diff)
result = _maybe_try_sort(res_values, sort)
if not self._is_multi:
return Index(result, name=result_name, dtype=res_values.dtype)
else:
left_diff = cast("MultiIndex", left_diff)
if len(result) == 0:
# result might be an Index, if other was an Index
return left_diff.remove_unused_levels().set_names(result_name)
return result.set_names(result_name)
@final
def _assert_can_do_setop(self, other) -> bool:
if not is_list_like(other):
raise TypeError("Input must be Index or array-like")
return True
def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = get_op_result_name(self, other)
return other, result_name
# --------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
Returns
-------
int if unique index, slice if monotonic index, else mask
Examples
--------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True])
"""
casted_key = self._maybe_cast_indexer(key)
try:
return self._engine.get_loc(casted_key)
except KeyError as err:
if isinstance(casted_key, slice) or (
isinstance(casted_key, abc.Iterable)
and any(isinstance(x, slice) for x in casted_key)
):
raise InvalidIndexError(key)
raise KeyError(key) from err
except TypeError:
# If we have a listlike key, _check_indexing_error will raise
# InvalidIndexError. Otherwise we fall through and re-raise
# the TypeError.
self._check_indexing_error(key)
raise
@final
def get_indexer(
self,
target,
method: ReindexMethod | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
"""
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
Notes
-----
Returns -1 for unmatched values, for further explanation see the
example below.
Examples
--------
>>> index = pd.Index(['c', 'a', 'b'])
>>> index.get_indexer(['a', 'b', 'x'])
array([ 1, 2, -1])
Notice that the return value is an array of locations in ``index``
and ``x`` is marked by -1, as it is not in ``index``.
"""
method = clean_reindex_fill_method(method)
orig_target = target
target = self._maybe_cast_listlike_indexer(target)
self._check_indexing_method(method, limit, tolerance)
if not self._index_as_unique:
raise InvalidIndexError(self._requires_unique_msg)
if len(target) == 0:
return np.array([], dtype=np.intp)
if not self._should_compare(target) and not self._should_partial_index(target):
# IntervalIndex get special treatment bc numeric scalars can be
# matched to Interval scalars
return self._get_indexer_non_comparable(target, method=method, unique=True)
if isinstance(self.dtype, CategoricalDtype):
# _maybe_cast_listlike_indexer ensures target has our dtype
# (could improve perf by doing _should_compare check earlier?)
assert self.dtype == target.dtype
indexer = self._engine.get_indexer(target.codes)
if self.hasnans and target.hasnans:
# After _maybe_cast_listlike_indexer, target elements which do not
# belong to some category are changed to NaNs
# Mask to track actual NaN values compared to inserted NaN values
# GH#45361
target_nans = isna(orig_target)
loc = self.get_loc(np.nan)
mask = target.isna()
indexer[target_nans] = loc
indexer[mask & ~target_nans] = -1
return indexer
if isinstance(target.dtype, CategoricalDtype):
# potential fastpath
# get an indexer for unique categories then propagate to codes via take_nd
# get_indexer instead of _get_indexer needed for MultiIndex cases
# e.g. test_append_different_columns_types
categories_indexer = self.get_indexer(target.categories)
indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1)
if (not self._is_multi and self.hasnans) and target.hasnans:
# Exclude MultiIndex because hasnans raises NotImplementedError
# we should only get here if we are unique, so loc is an integer
# GH#41934
loc = self.get_loc(np.nan)
mask = target.isna()
indexer[mask] = loc
return ensure_platform_int(indexer)
pself, ptarget = self._maybe_downcast_for_indexing(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(
ptarget, method=method, limit=limit, tolerance=tolerance
)
if self.dtype == target.dtype and self.equals(target):
# Only call equals if we have same dtype to avoid inference/casting
return np.arange(len(target), dtype=np.intp)
if self.dtype != target.dtype and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
target = target.astype(dtype, copy=False)
return this._get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
return self._get_indexer(target, method, limit, tolerance)
def _get_indexer(
self,
target: Index,
method: str_t | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if method in ["pad", "backfill"]:
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == "nearest":
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if target._is_multi and self._is_multi:
engine = self._engine
# error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]"
# has no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes( # type: ignore[union-attr]
target
)
else:
tgt_values = target._get_engine_target()
indexer = self._engine.get_indexer(tgt_values)
return ensure_platform_int(indexer)
@final
def _should_partial_index(self, target: Index) -> bool:
"""
Should we attempt partial-matching indexing?
"""
if isinstance(self.dtype, IntervalDtype):
if isinstance(target.dtype, IntervalDtype):
return False
# "Index" has no attribute "left"
return self.left._should_compare(target) # type: ignore[attr-defined]
return False
@final
def _check_indexing_method(
self,
method: str_t | None,
limit: int | None = None,
tolerance=None,
) -> None:
"""
Raise if we have a get_indexer `method` that is not supported or valid.
"""
if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]:
# in practice the clean_reindex_fill_method call would raise
# before we get here
raise ValueError("Invalid fill method") # pragma: no cover
if self._is_multi:
if method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
if method in ("pad", "backfill"):
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
if isinstance(self.dtype, (IntervalDtype, CategoricalDtype)):
# GH#37871 for now this is only for IntervalIndex and CategoricalIndex
if method is not None:
raise NotImplementedError(
f"method {method} not yet implemented for {type(self).__name__}"
)
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if doing pad, "
"backfill or nearest reindexing"
)
if limit is not None:
raise ValueError(
"limit argument only valid if doing pad, "
"backfill or nearest reindexing"
)
def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray:
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype "
f"{self.dtype} must contain numeric elements if it is list type"
)
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype {self.dtype} "
f"must be numeric if it is a scalar: {repr(tolerance)}"
)
return tolerance
@final
def _get_fill_indexer(
self, target: Index, method: str_t, limit: int | None = None, tolerance=None
) -> npt.NDArray[np.intp]:
if self._is_multi:
if not (self.is_monotonic_increasing or self.is_monotonic_decreasing):
raise ValueError("index must be monotonic increasing or decreasing")
encoded = self.append(target)._engine.values # type: ignore[union-attr]
self_encoded = Index(encoded[: len(self)])
target_encoded = Index(encoded[len(self) :])
return self_encoded._get_fill_indexer(
target_encoded, method, limit, tolerance
)
if self.is_monotonic_increasing and target.is_monotonic_increasing:
target_values = target._get_engine_target()
own_values = self._get_engine_target()
if not isinstance(target_values, np.ndarray) or not isinstance(
own_values, np.ndarray
):
raise NotImplementedError
if method == "pad":
indexer = libalgos.pad(own_values, target_values, limit=limit)
else:
# i.e. "backfill"
indexer = libalgos.backfill(own_values, target_values, limit=limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None and len(self):
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
@final
def _get_fill_indexer_searchsorted(
self, target: Index, method: str_t, limit: int | None = None
) -> npt.NDArray[np.intp]:
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError(
f"limit argument for {repr(method)} method only well-defined "
"if index and target are monotonic"
)
side: Literal["left", "right"] = "left" if method == "pad" else "right"
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = indexer == -1
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
if side == "left":
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
@final
def _get_nearest_indexer(
self, target: Index, limit: int | None, tolerance
) -> npt.NDArray[np.intp]:
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
if not len(self):
return self._get_fill_indexer(target, "pad")
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
left_distances = self._difference_compat(target, left_indexer)
right_distances = self._difference_compat(target, right_indexer)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(
# error: Argument 1&2 has incompatible type "Union[ExtensionArray,
# ndarray[Any, Any]]"; expected "Union[SupportsDunderLE,
# SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]"
op(left_distances, right_distances) # type: ignore[arg-type]
| (right_indexer == -1),
left_indexer,
right_indexer,
)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
@final
def _filter_indexer_tolerance(
self,
target: Index,
indexer: npt.NDArray[np.intp],
tolerance,
) -> npt.NDArray[np.intp]:
distance = self._difference_compat(target, indexer)
return np.where(distance <= tolerance, indexer, -1)
@final
def _difference_compat(
self, target: Index, indexer: npt.NDArray[np.intp]
) -> ArrayLike:
# Compatibility for PeriodArray, for which __sub__ returns an ndarray[object]
# of DateOffset objects, which do not support __abs__ (and would be slow
# if they did)
if isinstance(self.dtype, PeriodDtype):
# Note: we only get here with matching dtypes
own_values = cast("PeriodArray", self._data)._ndarray
target_values = cast("PeriodArray", target._data)._ndarray
diff = own_values[indexer] - target_values
else:
# error: Unsupported left operand type for - ("ExtensionArray")
diff = self._values[indexer] - target._values # type: ignore[operator]
return abs(diff)
# --------------------------------------------------------------------
# Indexer Conversion Methods
@final
def _validate_positional_slice(self, key: slice) -> None:
"""
For positional indexing, a slice must have either int or None
for each of start, stop, and step.
"""
self._validate_indexer("positional", key.start, "iloc")
self._validate_indexer("positional", key.stop, "iloc")
self._validate_indexer("positional", key.step, "iloc")
def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
"""
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'loc', 'getitem'}
"""
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
is_index_slice = is_valid_positional_slice(key)
# TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able
# to simplify this.
if lib.is_np_dtype(self.dtype, "f"):
# We always treat __getitem__ slicing as label-based
# translate to locations
if kind == "getitem" and is_index_slice and not start == stop and step != 0:
# exclude step=0 from the warning because it will raise anyway
# start/stop both None e.g. [:] or [::-1] won't change.
# exclude start==stop since it will be empty either way, or
# will be [:] or [::-1] which won't change
warnings.warn(
# GH#49612
"The behavior of obj[i:j] with a float-dtype index is "
"deprecated. In a future version, this will be treated as "
"positional instead of label-based. For label-based slicing, "
"use obj.loc[i:j] instead",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.slice_indexer(start, stop, step)
if kind == "getitem":
# called from the getitem slicers, validate that we are in fact integers
if is_index_slice:
# In this case the _validate_indexer checks below are redundant
return key
elif self.dtype.kind in "iu":
# Note: these checks are redundant if we know is_index_slice
self._validate_indexer("slice", key.start, "getitem")
self._validate_indexer("slice", key.stop, "getitem")
self._validate_indexer("slice", key.step, "getitem")
return key
# convert the slice to an indexer here; checking that the user didn't
# pass a positional slice to loc
is_positional = is_index_slice and self._should_fallback_to_positional
# if we are mixed and have integers
if is_positional:
try:
# Validate start & stop
if start is not None:
self.get_loc(start)
if stop is not None:
self.get_loc(stop)
is_positional = False
except KeyError:
pass
if com.is_null_slice(key):
# It doesn't matter if we are positional or label based
indexer = key
elif is_positional:
if kind == "loc":
# GH#16121, GH#24612, GH#31810
raise TypeError(
"Slicing a positional slice with .loc is not allowed, "
"Use .loc with labels or .iloc with positions instead.",
)
indexer = key
else:
indexer = self.slice_indexer(start, stop, step)
return indexer
@final
def _raise_invalid_indexer(
self,
form: Literal["slice", "positional"],
key,
reraise: lib.NoDefault | None | Exception = lib.no_default,
) -> None:
"""
Raise consistent invalid indexer message.
"""
msg = (
f"cannot do {form} indexing on {type(self).__name__} with these "
f"indexers [{key}] of type {type(key).__name__}"
)
if reraise is not lib.no_default:
raise TypeError(msg) from reraise
raise TypeError(msg)
# --------------------------------------------------------------------
# Reindex Methods
@final
def _validate_can_reindex(self, indexer: np.ndarray) -> None:
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer ndarray
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self._index_as_unique and len(indexer):
raise ValueError("cannot reindex on an axis with duplicate labels")
def reindex(
self,
target,
method: ReindexMethod | None = None,
level=None,
limit: int | None = None,
tolerance: float | None = None,
) -> tuple[Index, npt.NDArray[np.intp] | None]:
"""
Create index with target's values.
Parameters
----------
target : an iterable
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
level : int, optional
Level of multiindex.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : int or float, optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
Raises
------
TypeError
If ``method`` passed along with ``level``.
ValueError
If non-unique multi-index
ValueError
If non-unique index and ``method`` or ``limit`` passed.
See Also
--------
Series.reindex : Conform Series to new index with optional filling logic.
DataFrame.reindex : Conform DataFrame to new index with optional filling logic.
Examples
--------
>>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.reindex(['car', 'bike'])
(Index(['car', 'bike'], dtype='object'), array([0, 1]))
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "name")
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
if level is not None and self._is_multi:
# "Index" has no attribute "levels"; maybe "nlevels"?
idx = self.levels[level] # type: ignore[attr-defined]
else:
idx = self
target = idx[:0]
else:
target = ensure_index(target)
if level is not None and (
isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex)
):
if method is not None:
raise TypeError("Fill method not supported if level passed")
# TODO: tests where passing `keep_order=not self._is_multi`
# makes a difference for non-MultiIndex case
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=not self._is_multi
)
else:
if self.equals(target):
indexer = None
else:
if self._index_as_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
elif self._is_multi:
raise ValueError("cannot handle a non-unique multi-index!")
elif not self.is_unique:
# GH#42568
raise ValueError("cannot reindex on an axis with duplicate labels")
else:
indexer, _ = self.get_indexer_non_unique(target)
target = self._wrap_reindex_result(target, indexer, preserve_names)
return target, indexer
def _wrap_reindex_result(self, target, indexer, preserve_names: bool):
target = self._maybe_preserve_names(target, preserve_names)
return target
def _maybe_preserve_names(self, target: Index, preserve_names: bool):
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy(deep=False)
target.name = self.name
return target
@final
def _reindex_non_unique(
self, target: Index
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]:
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
new_indexer : np.ndarray[np.intp] or None
"""
target = ensure_index(target)
if len(target) == 0:
# GH#13691
return self[:0], np.array([], dtype=np.intp), None
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels: Index | np.ndarray = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer), dtype=np.intp)
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = length[~check]
cur_labels = self.take(indexer[check]).values
cur_indexer = length[check]
# Index constructor below will do inference
new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# GH#38906
if not len(self):
new_indexer = np.arange(0, dtype=np.intp)
# a unique indexer
elif target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer), dtype=np.intp)
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)
new_indexer[~check] = -1
if not isinstance(self, ABCMultiIndex):
new_index = Index(new_labels, name=self.name)
else:
new_index = type(self).from_tuples(new_labels, names=self.names)
return new_index, indexer, new_indexer
# --------------------------------------------------------------------
# Join Methods
@overload
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: Literal[True],
sort: bool = ...,
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
...
@overload
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: Literal[False] = ...,
sort: bool = ...,
) -> Index:
...
@overload
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: bool = ...,
sort: bool = ...,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
...
@final
@_maybe_return_indexers
def join(
self,
other: Index,
*,
how: JoinHow = "left",
level: Level | None = None,
return_indexers: bool = False,
sort: bool = False,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
Compute join_index and indexers to conform data structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : bool, default False
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns
-------
join_index, (left_indexer, right_indexer)
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([4, 5, 6])
>>> idx1.join(idx2, how='outer')
Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
other = ensure_index(other)
sort = sort or how == "outer"
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (self.tz is None) ^ (other.tz is None):
# Raise instead of casting to object below.
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
if not self._is_multi and not other._is_multi:
# We have specific handling for MultiIndex below
pself, pother = self._maybe_downcast_for_indexing(other)
if pself is not self or pother is not other:
return pself.join(
pother, how=how, level=level, return_indexers=True, sort=sort
)
# try to figure out the join level
# GH3662
if level is None and (self._is_multi or other._is_multi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how)
# join on the level
if level is not None and (self._is_multi or other._is_multi):
return self._join_level(other, level, how=how)
if len(self) == 0 or len(other) == 0:
try:
return self._join_empty(other, how, sort)
except TypeError:
# object dtype; non-comparable objects
pass
if self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.join(other, how=how, return_indexers=True)
elif (
isinstance(self, ABCCategoricalIndex)
and isinstance(other, ABCCategoricalIndex)
and not self.ordered
and not self.categories.equals(other.categories)
):
# dtypes are "equal" but categories are in different order
other = Index(other._values.reorder_categories(self.categories))
_validate_join_method(how)
if (
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and other._can_use_libjoin
and (self.is_unique or other.is_unique)
):
try:
return self._join_monotonic(other, how=how)
except TypeError:
# object dtype; non-comparable objects
pass
elif not self.is_unique or not other.is_unique:
return self._join_non_unique(other, how=how, sort=sort)
return self._join_via_get_indexer(other, how, sort)
@final
def _join_empty(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
assert len(self) == 0 or len(other) == 0
_validate_join_method(how)
lidx: np.ndarray | None
ridx: np.ndarray | None
if len(other):
how = cast(JoinHow, {"left": "right", "right": "left"}.get(how, how))
join_index, ridx, lidx = other._join_empty(self, how, sort)
elif how in ["left", "outer"]:
if sort and not self.is_monotonic_increasing:
lidx = self.argsort()
join_index = self.take(lidx)
else:
lidx = None
join_index = self._view()
ridx = np.broadcast_to(np.intp(-1), len(join_index))
else:
join_index = other._view()
lidx = np.array([], dtype=np.intp)
ridx = None
return join_index, lidx, ridx
@final
def _join_via_get_indexer(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# Fallback if we do not have any fastpaths available based on
# uniqueness/monotonicity
# Note: at this point we have checked matching dtypes
if how == "left":
join_index = self.sort_values() if sort else self
elif how == "right":
join_index = other.sort_values() if sort else other
elif how == "inner":
join_index = self.intersection(other, sort=sort)
elif how == "outer":
try:
join_index = self.union(other, sort=sort)
except TypeError:
join_index = self.union(other)
try:
join_index = _maybe_try_sort(join_index, sort)
except TypeError:
pass
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer_for(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer_for(join_index)
return join_index, lindexer, rindexer
@final
def _join_multi(self, other: Index, how: JoinHow):
from pandas.core.indexes.multi import MultiIndex
from pandas.core.reshape.merge import restore_dropped_levels_multijoin
# figure out join names
self_names_list = list(com.not_none(*self.names))
other_names_list = list(com.not_none(*other.names))
self_names_order = self_names_list.index
other_names_order = other_names_list.index
self_names = set(self_names_list)
other_names = set(other_names_list)
overlap = self_names & other_names
# need at least 1 in common
if not overlap:
raise ValueError("cannot join with no overlapping index names")
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
# Drop the non-matching levels from left and right respectively
ldrop_names = sorted(self_names - overlap, key=self_names_order)
rdrop_names = sorted(other_names - overlap, key=other_names_order)
# if only the order differs
if not len(ldrop_names + rdrop_names):
self_jnlevels = self
other_jnlevels = other.reorder_levels(self.names)
else:
self_jnlevels = self.droplevel(ldrop_names)
other_jnlevels = other.droplevel(rdrop_names)
# Join left and right
# Join on same leveled multi-index frames is supported
join_idx, lidx, ridx = self_jnlevels.join(
other_jnlevels, how=how, return_indexers=True
)
# Restore the dropped levels
# Returned index level order is
# common levels, ldrop_names, rdrop_names
dropped_names = ldrop_names + rdrop_names
# error: Argument 5/6 to "restore_dropped_levels_multijoin" has
# incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any
# ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]"
levels, codes, names = restore_dropped_levels_multijoin(
self,
other,
dropped_names,
join_idx,
lidx, # type: ignore[arg-type]
ridx, # type: ignore[arg-type]
)
# Re-create the multi-index
multi_join_idx = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
multi_join_idx = multi_join_idx.remove_unused_levels()
# maintain the order of the index levels
if how == "right":
level_order = other_names_list + ldrop_names
else:
level_order = self_names_list + rdrop_names
multi_join_idx = multi_join_idx.reorder_levels(level_order)
return multi_join_idx, lidx, ridx
jl = next(iter(overlap))
# Case where only one index is multi
# make the indices into mi's that match
flip_order = False
if isinstance(self, MultiIndex):
self, other = other, self
flip_order = True
# flip if join method is right or left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how)
if flip_order:
return result[0], result[2], result[1]
return result
@final
def _join_non_unique(
self, other: Index, how: JoinHow = "left", sort: bool = False
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
from pandas.core.reshape.merge import get_join_indexers_non_unique
# We only get here if dtypes match
assert self.dtype == other.dtype
left_idx, right_idx = get_join_indexers_non_unique(
self._values, other._values, how=how, sort=sort
)
mask = left_idx == -1
join_idx = self.take(left_idx)
right = other.take(right_idx)
join_index = join_idx.putmask(mask, right)
if isinstance(join_index, ABCMultiIndex) and how == "outer":
# test_join_index_levels
join_index = join_index._sort_levels_monotonic()
return join_index, left_idx, right_idx
@final
def _join_level(
self, other: Index, level, how: JoinHow = "left", keep_order: bool = True
) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex.
If ```keep_order == True```, the order of the data indexed by the
MultiIndex will not be changed; otherwise, it will tie out
with `other`.
"""
from pandas.core.indexes.multi import MultiIndex
def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:
"""
Returns sorter for the inner most level while preserving the
order of higher levels.
Parameters
----------
labels : list[np.ndarray]
Each ndarray has signed integer dtype, not necessarily identical.
Returns
-------
np.ndarray[np.intp]
"""
if labels[0].size == 0:
return np.empty(0, dtype=np.intp)
if len(labels) == 1:
return get_group_index_sorter(ensure_platform_int(labels[0]))
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = ensure_int64(labels[-1])
return lib.get_level_sorter(lab, ensure_platform_int(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError("Join on level between two MultiIndex objects is ambiguous")
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
assert isinstance(left, MultiIndex)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError(
"Index._join_level on non-unique index is not implemented"
)
new_level, left_lev_indexer, right_lev_indexer = old_level.join(
right, how=how, return_indexers=True
)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.codes[: level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = ensure_platform_int(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))
old_codes = left.codes[level]
taker = old_codes[old_codes != -1]
new_lev_codes = rev_indexer.take(taker)
new_codes = list(left.codes)
new_codes[level] = new_lev_codes
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
left_indexer = cast(np.ndarray, left_indexer)
mask = new_lev_codes != -1
if not mask.all():
new_codes = [lab[mask] for lab in new_codes]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max()
ngroups = 1 + max_new_lev
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_codes, ngroups
)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0] :]
new_codes = [lab[left_indexer] for lab in new_codes]
else: # sort the leaves
mask = new_lev_codes != -1
mask_all = mask.all()
if not mask_all:
new_codes = [lab[mask] for lab in new_codes]
left_indexer = _get_leaf_sorter(new_codes[: level + 1])
new_codes = [lab[left_indexer] for lab in new_codes]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(
levels=new_levels,
codes=new_codes,
names=left.names,
verify_integrity=False,
)
if right_lev_indexer is not None:
right_indexer = right_lev_indexer.take(join_index.codes[level])
else:
right_indexer = join_index.codes[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
left_indexer = (
None if left_indexer is None else ensure_platform_int(left_indexer)
)
right_indexer = (
None if right_indexer is None else ensure_platform_int(right_indexer)
)
return join_index, left_indexer, right_indexer
@final
def _join_monotonic(
self, other: Index, how: JoinHow = "left"
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# We only get here with matching dtypes and both monotonic increasing
assert other.dtype == self.dtype
assert self._can_use_libjoin and other._can_use_libjoin
if self.equals(other):
# This is a convenient place for this check, but its correctness
# does not depend on monotonicity, so it could go earlier
# in the calling method.
ret_index = other if how == "right" else self
return ret_index, None, None
ridx: npt.NDArray[np.intp] | None
lidx: npt.NDArray[np.intp] | None
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == "left":
join_index = self
lidx = None
ridx = self._left_indexer_unique(other)
elif how == "right":
join_index = other
lidx = other._left_indexer_unique(self)
ridx = None
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
else:
if how == "left":
join_array, lidx, ridx = self._left_indexer(other)
elif how == "right":
join_array, ridx, lidx = other._left_indexer(self)
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
assert lidx is not None
assert ridx is not None
join_index = self._wrap_joined_index(join_array, other, lidx, ridx)
lidx = None if lidx is None else ensure_platform_int(lidx)
ridx = None if ridx is None else ensure_platform_int(ridx)
return join_index, lidx, ridx
def _wrap_joined_index(
self,
joined: ArrayLike,
other: Self,
lidx: npt.NDArray[np.intp],
ridx: npt.NDArray[np.intp],
) -> Self:
assert other.dtype == self.dtype
if isinstance(self, ABCMultiIndex):
name = self.names if self.names == other.names else None
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
mask = lidx == -1
join_idx = self.take(lidx)
right = cast("MultiIndex", other.take(ridx))
join_index = join_idx.putmask(mask, right)._sort_levels_monotonic()
return join_index.set_names(name) # type: ignore[return-value]
else:
name = get_op_result_name(self, other)
return self._constructor._with_infer(joined, name=name, dtype=self.dtype)
@final
@cache_readonly
def _can_use_libjoin(self) -> bool:
"""
Whether we can use the fastpaths implemented in _libs.join.
This is driven by whether (in monotonic increasing cases that are
guaranteed not to have NAs) we can convert to a np.ndarray without
making a copy. If we cannot, this negates the performance benefit
of using libjoin.
"""
if type(self) is Index:
# excludes EAs, but include masks, we get here with monotonic
# values only, meaning no NA
return (
isinstance(self.dtype, np.dtype)
or isinstance(self._values, (ArrowExtensionArray, BaseMaskedArray))
or self.dtype == "string[python]"
)
# Exclude index types where the conversion to numpy converts to object dtype,
# which negates the performance benefit of libjoin
# Subclasses should override to return False if _get_join_target is
# not zero-copy.
# TODO: exclude RangeIndex (which allocates memory)?
# Doing so seems to break test_concat_datetime_timezone
return not isinstance(self, (ABCIntervalIndex, ABCMultiIndex))
# --------------------------------------------------------------------
# Uncategorized Methods
@property
def values(self) -> ArrayLike:
"""
Return an array representing the data in the Index.
.. warning::
We recommend using :attr:`Index.array` or
:meth:`Index.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
array: numpy.ndarray or ExtensionArray
See Also
--------
Index.array : Reference to the underlying data.
Index.to_numpy : A NumPy array representing the underlying data.
Examples
--------
For :class:`pandas.Index`:
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.values
array([1, 2, 3])
For :class:`pandas.IntervalIndex`:
>>> idx = pd.interval_range(start=0, end=5)
>>> idx.values
<IntervalArray>
[(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
Length: 5, dtype: interval[int64, right]
"""
if using_copy_on_write():
data = self._data
if isinstance(data, np.ndarray):
data = data.view()
data.flags.writeable = False
return data
return self._data
@cache_readonly
@doc(IndexOpsMixin.array)
def array(self) -> ExtensionArray:
array = self._data
if isinstance(array, np.ndarray):
from pandas.core.arrays.numpy_ import NumpyExtensionArray
array = NumpyExtensionArray(array)
return array
@property
def _values(self) -> ExtensionArray | np.ndarray:
"""
The best array representation.
This is an ndarray or ExtensionArray.
``_values`` are consistent between ``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values |
----------------- | --------------- | ------------- |
Index | ndarray | ndarray |
CategoricalIndex | Categorical | Categorical |
DatetimeIndex | ndarray[M8ns] | DatetimeArray |
DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray |
PeriodIndex | ndarray[object] | PeriodArray |
IntervalIndex | IntervalArray | IntervalArray |
See Also
--------
values : Values
"""
return self._data
def _get_engine_target(self) -> ArrayLike:
"""
Get the ndarray or ExtensionArray that we can pass to the IndexEngine
constructor.
"""
vals = self._values
if isinstance(vals, StringArray):
# GH#45652 much more performant than ExtensionEngine
return vals._ndarray
if isinstance(vals, ArrowExtensionArray) and self.dtype.kind in "Mm":
import pyarrow as pa
pa_type = vals._pa_array.type
if pa.types.is_timestamp(pa_type):
vals = vals._to_datetimearray()
return vals._ndarray.view("i8")
elif pa.types.is_duration(pa_type):
vals = vals._to_timedeltaarray()
return vals._ndarray.view("i8")
if (
type(self) is Index
and isinstance(self._values, ExtensionArray)
and not isinstance(self._values, BaseMaskedArray)
and not (
isinstance(self._values, ArrowExtensionArray)
and is_numeric_dtype(self.dtype)
# Exclude decimal
and self.dtype.kind != "O"
)
):
# TODO(ExtensionIndex): remove special-case, just use self._values
return self._values.astype(object)
return vals
@final
def _get_join_target(self) -> np.ndarray:
"""
Get the ndarray or ExtensionArray that we can pass to the join
functions.
"""
if isinstance(self._values, BaseMaskedArray):
# This is only used if our array is monotonic, so no NAs present
return self._values._data
elif isinstance(self._values, ArrowExtensionArray):
# This is only used if our array is monotonic, so no missing values
# present
return self._values.to_numpy()
# TODO: exclude ABCRangeIndex case here as it copies
target = self._get_engine_target()
if not isinstance(target, np.ndarray):
raise ValueError("_can_use_libjoin should return False.")
return target
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
"""
Cast the ndarray returned from one of the libjoin.foo_indexer functions
back to type(self._data).
"""
if isinstance(self.values, BaseMaskedArray):
return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_))
elif isinstance(self.values, (ArrowExtensionArray, StringArray)):
return type(self.values)._from_sequence(result, dtype=self.dtype)
return result
@doc(IndexOpsMixin._memory_usage)
def memory_usage(self, deep: bool = False) -> int:
result = self._memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
@final
def where(self, cond, other=None) -> Index:
"""
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
pandas.Index
A copy of self with values replaced from other
where the condition is False.
See Also
--------
Series.where : Same method for Series.
DataFrame.where : Same method for DataFrame.
Examples
--------
>>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.where(idx.isin(['car', 'train']), 'other')
Index(['car', 'other', 'train', 'other'], dtype='object')
"""
if isinstance(self, ABCMultiIndex):
raise NotImplementedError(
".where is not supported for MultiIndex operations"
)
cond = np.asarray(cond, dtype=bool)
return self.putmask(~cond, other)
# construction helpers
@final
@classmethod
def _raise_scalar_data_error(cls, data):
# We return the TypeError so that we can raise it from the constructor
# in order to keep mypy happy
raise TypeError(
f"{cls.__name__}(...) must be called with a collection of some "
f"kind, {repr(data) if not isinstance(data, np.generic) else str(data)} "
"was passed"
)
def _validate_fill_value(self, value):
"""
Check if the value can be inserted into our array without casting,
and convert it to an appropriate native type if necessary.
Raises
------
TypeError
If the value cannot be inserted into an array of this dtype.
"""
dtype = self.dtype
if isinstance(dtype, np.dtype) and dtype.kind not in "mM":
# return np_can_hold_element(dtype, value)
try:
return np_can_hold_element(dtype, value)
except LossySetitemError as err:
# re-raise as TypeError for consistency
raise TypeError from err
elif not can_hold_element(self._values, value):
raise TypeError
return value
def _is_memory_usage_qualified(self) -> bool:
"""
Return a boolean if we need a qualified .info display.
"""
return is_object_dtype(self.dtype)
def __contains__(self, key: Any) -> bool:
"""
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
"""
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
__hash__: ClassVar[None] # type: ignore[assignment]
@final
def __setitem__(self, key, value) -> None:
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
getitem = self._data.__getitem__
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
key = com.cast_scalar_indexer(key)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization com.is_bool_indexer and ndim checks.
return self._getitem_slice(key)
if com.is_bool_indexer(key):
# if we have list[bools, length=1e5] then doing this check+convert
# takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__
# time below from 3.8 ms to 496 µs
# if we already have ndarray[bool], the overhead is 1.4 µs or .25%
if isinstance(getattr(key, "dtype", None), ExtensionDtype):
key = key.to_numpy(dtype=bool, na_value=False)
else:
key = np.asarray(key, dtype=bool)
if not isinstance(self.dtype, ExtensionDtype):
if len(key) == 0 and len(key) != len(self):
warnings.warn(
"Using a boolean indexer with length 0 on an Index with "
"length greater than 0 is deprecated and will raise in a "
"future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
disallow_ndim_indexing(result)
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
return self._constructor._simple_new(result, name=self._name)
def _getitem_slice(self, slobj: slice) -> Self:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._data[slobj]
result = type(self)._simple_new(res, name=self._name, refs=self._references)
if "_engine" in self._cache:
reverse = slobj.step is not None and slobj.step < 0
result._engine._update_from_sliced(self._engine, reverse=reverse) # type: ignore[union-attr]
return result
@final
def _can_hold_identifiers_and_holds_name(self, name) -> bool:
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if (
is_object_dtype(self.dtype)
or is_string_dtype(self.dtype)
or isinstance(self.dtype, CategoricalDtype)
):
return name in self
return False
def append(self, other: Index | Sequence[Index]) -> Index:
"""
Append a collection of Index options together.
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
Index
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.append(pd.Index([4]))
Index([1, 2, 3, 4], dtype='int64')
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat += list(other)
else:
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[Index, Sequence[Index]]"; expected "Index"
to_concat.append(other) # type: ignore[arg-type]
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError("all inputs must be Index")
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
"""
Concatenate multiple Index objects.
"""
to_concat_vals = [x._values for x in to_concat]
result = concat_compat(to_concat_vals)
return Index._with_infer(result, name=name)
def putmask(self, mask, value) -> Index:
"""
Return a new Index of the values set with the mask.
Returns
-------
Index
See Also
--------
numpy.ndarray.putmask : Changes elements of an array
based on conditional and input values.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([5, 6, 7])
>>> idx1.putmask([True, False, False], idx2)
Index([5, 2, 3], dtype='int64')
"""
mask, noop = validate_putmask(self._values, mask)
if noop:
return self.copy()
if self.dtype != object and is_valid_na_for_dtype(value, self.dtype):
# e.g. None -> np.nan, see also Block._standardize_fill_value
value = self._na_value
try:
converted = self._validate_fill_value(value)
except (LossySetitemError, ValueError, TypeError) as err:
if is_object_dtype(self.dtype): # pragma: no cover
raise err
# See also: Block.coerce_to_target_dtype
dtype = self._find_common_type_compat(value)
return self.astype(dtype).putmask(mask, value)
values = self._values.copy()
if isinstance(values, np.ndarray):
converted = setitem_datetimelike_compat(values, mask.sum(), converted)
np.putmask(values, mask, converted)
else:
# Note: we use the original value here, not converted, as
# _validate_fill_value is not idempotent
values._putmask(mask, value)
return self._shallow_copy(values)
def equals(self, other: Any) -> bool:
"""
Determine if two Index object are equal.
The things that are being compared are:
* The elements inside the Index object.
* The order of the elements inside the Index object.
Parameters
----------
other : Any
The other object to compare against.
Returns
-------
bool
True if "other" is an Index and it has the same elements and order
as the calling index; False otherwise.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx1
Index([1, 2, 3], dtype='int64')
>>> idx1.equals(pd.Index([1, 2, 3]))
True
The elements inside are compared
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2
Index(['1', '2', '3'], dtype='object')
>>> idx1.equals(idx2)
False
The order is compared
>>> ascending_idx = pd.Index([1, 2, 3])
>>> ascending_idx
Index([1, 2, 3], dtype='int64')
>>> descending_idx = pd.Index([3, 2, 1])
>>> descending_idx
Index([3, 2, 1], dtype='int64')
>>> ascending_idx.equals(descending_idx)
False
The dtype is *not* compared
>>> int64_idx = pd.Index([1, 2, 3], dtype='int64')
>>> int64_idx
Index([1, 2, 3], dtype='int64')
>>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64')
>>> uint64_idx
Index([1, 2, 3], dtype='uint64')
>>> int64_idx.equals(uint64_idx)
True
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
# quickly return if the lengths are different
return False
if (
isinstance(self.dtype, StringDtype)
and self.dtype.storage == "pyarrow_numpy"
and other.dtype != self.dtype
):
# special case for object behavior
return other.equals(self.astype(object))
if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):
# if other is not object, use other's logic for coercion
return other.equals(self)
if isinstance(other, ABCMultiIndex):
# d-level MultiIndex can equal d-tuple Index
return other.equals(self)
if isinstance(self._values, ExtensionArray):
# Dispatch to the ExtensionArray's .equals method.
if not isinstance(other, type(self)):
return False
earr = cast(ExtensionArray, self._data)
return earr.equals(other._data)
if isinstance(other.dtype, ExtensionDtype):
# All EA-backed Index subclasses override equals
return other.equals(self)
return array_equivalent(self._values, other._values)
@final
def identical(self, other) -> bool:
"""
Similar to equals, but checks that object attributes and types are also equal.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
Examples
--------
>>> idx1 = pd.Index(['1', '2', '3'])
>>> idx2 = pd.Index(['1', '2', '3'])
>>> idx2.identical(idx1)
True
>>> idx1 = pd.Index(['1', '2', '3'], name="A")
>>> idx2 = pd.Index(['1', '2', '3'], name="B")
>>> idx2.identical(idx1)
False
"""
return (
self.equals(other)
and all(
getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables
)
and type(self) == type(other)
and self.dtype == other.dtype
)
@final
def asof(self, label):
"""
Return the label from the index, or, if not present, the previous one.
Assuming that the index is sorted, return the passed index label if it
is in the index, or return the previous index label if the passed one
is not in the index.
Parameters
----------
label : object
The label up to which the method returns the latest index label.
Returns
-------
object
The passed label if it is in the index. The previous label if the
passed label is not in the sorted index or `NaN` if there is no
such label.
See Also
--------
Series.asof : Return the latest value in a Series up to the
passed index.
merge_asof : Perform an asof merge (similar to left join but it
matches on nearest key rather than equal key).
Index.get_loc : An `asof` is a thin wrapper around `get_loc`
with method='pad'.
Examples
--------
`Index.asof` returns the latest index label up to the passed label.
>>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])
>>> idx.asof('2014-01-01')
'2013-12-31'
If the label is in the index, the method returns the passed label.
>>> idx.asof('2014-01-02')
'2014-01-02'
If all of the labels in the index are later than the passed label,
NaN is returned.
>>> idx.asof('1999-01-02')
nan
If the index is not sorted, an error is raised.
>>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',
... '2014-01-03'])
>>> idx_not_sorted.asof('2013-12-31')
Traceback (most recent call last):
ValueError: index must be monotonic increasing or decreasing
"""
self._searchsorted_monotonic(label) # validate sortedness
try:
loc = self.get_loc(label)
except (KeyError, TypeError):
# KeyError -> No exact match, try for padded
# TypeError -> passed e.g. non-hashable, fall through to get
# the tested exception message
indexer = self.get_indexer([label], method="pad")
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("asof requires scalar valued input")
loc = indexer.item()
if loc == -1:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(
self, where: Index, mask: npt.NDArray[np.bool_]
) -> npt.NDArray[np.intp]:
"""
Return the locations (indices) of labels in the index.
As in the :meth:`pandas.Index.asof`, if the label (a particular entry in
``where``) is not in the index, the latest index label up to the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in ``where``,
-1 is returned.
``mask`` is used to ignore ``NA`` values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not ``NA``.
Returns
-------
np.ndarray[np.intp]
An array of locations (indices) of the labels from the index
which correspond to the return values of :meth:`pandas.Index.asof`
for every element in ``where``.
See Also
--------
Index.asof : Return the label from the index, or, if not present, the
previous one.
Examples
--------
>>> idx = pd.date_range('2023-06-01', periods=3, freq='D')
>>> where = pd.DatetimeIndex(['2023-05-30 00:12:00', '2023-06-01 00:00:00',
... '2023-06-02 23:59:59'])
>>> mask = np.ones(3, dtype=bool)
>>> idx.asof_locs(where, mask)
array([-1, 0, 1])
We can use ``mask`` to ignore certain values in the index during calculation.
>>> mask[1] = False
>>> idx.asof_locs(where, mask)
array([-1, 0, 0])
"""
# error: No overload variant of "searchsorted" of "ndarray" matches argument
# types "Union[ExtensionArray, ndarray[Any, Any]]", "str"
# TODO: will be fixed when ExtensionArray.searchsorted() is fixed
locs = self._values[mask].searchsorted(
where._values, side="right" # type: ignore[call-overload]
)
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self), dtype=np.intp)[mask].take(locs)
first_value = self._values[mask.argmax()]
result[(locs == 0) & (where._values < first_value)] = -1
return result
@overload
def sort_values(
self,
*,
return_indexer: Literal[False] = ...,
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
) -> Self:
...
@overload
def sort_values(
self,
*,
return_indexer: Literal[True],
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
) -> tuple[Self, np.ndarray]:
...
@overload
def sort_values(
self,
*,
return_indexer: bool = ...,
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
) -> Self | tuple[Self, np.ndarray]:
...
@deprecate_nonkeyword_arguments(
version="3.0", allowed_args=["self"], name="sort_values"
)
def sort_values(
self,
return_indexer: bool = False,
ascending: bool = True,
na_position: NaPosition = "last",
key: Callable | None = None,
) -> Self | tuple[Self, np.ndarray]:
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
Series.sort_values : Sort values of a Series.
DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
if key is None and (
(ascending and self.is_monotonic_increasing)
or (not ascending and self.is_monotonic_decreasing)
):
if return_indexer:
indexer = np.arange(len(self), dtype=np.intp)
return self.copy(), indexer
else:
return self.copy()
# GH 35584. Sort missing values according to na_position kwarg
# ignore na_position for MultiIndex
if not isinstance(self, ABCMultiIndex):
_as = nargsort(
items=self, ascending=ascending, na_position=na_position, key=key
)
else:
idx = cast(Index, ensure_key_mapped(self, key))
_as = idx.argsort(na_position=na_position)
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
@final
def sort(self, *args, **kwargs):
"""
Use sort_values instead.
"""
raise TypeError("cannot sort an Index object in-place, use sort_values instead")
def shift(self, periods: int = 1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or str, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
Shifted index.
See Also
--------
Series.shift : Shift values of Series.
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
"""
raise NotImplementedError(
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(self).__name__}"
)
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
np.ndarray[np.intp]
Integer indices that would sort the index if used as
an indexer.
See Also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
# This works for either ndarray or EA, is overridden
# by RangeIndex, MultIIndex
return self._data.argsort(*args, **kwargs)
def _check_indexing_error(self, key):
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
raise InvalidIndexError(key)
@cache_readonly
def _should_fallback_to_positional(self) -> bool:
"""
Should an integer key be treated as positional?
"""
return self.inferred_type not in {
"integer",
"mixed-integer",
"floating",
"complex",
}
_index_shared_docs[
"get_indexer_non_unique"
] = """
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
Examples
--------
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['b', 'b'])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['q', 'r', 't'])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['f', 'b', 's'])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
"""
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(
self, target
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
target = ensure_index(target)
target = self._maybe_cast_listlike_indexer(target)
if not self._should_compare(target) and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
return self._get_indexer_non_comparable(target, method=None, unique=False)
pself, ptarget = self._maybe_downcast_for_indexing(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.dtype != target.dtype:
# TODO: if object, could use infer_dtype to preempt costly
# conversion if still non-comparable?
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
that = target.astype(dtype, copy=False)
return this.get_indexer_non_unique(that)
# TODO: get_indexer has fastpaths for both Categorical-self and
# Categorical-target. Can we do something similar here?
# Note: _maybe_downcast_for_indexing ensures we never get here
# with MultiIndex self and non-Multi target
if self._is_multi and target._is_multi:
engine = self._engine
# Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has
# no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr]
else:
tgt_values = target._get_engine_target()
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return ensure_platform_int(indexer), ensure_platform_int(missing)
@final
def get_indexer_for(self, target) -> npt.NDArray[np.intp]:
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
-------
np.ndarray[np.intp]
List of indices.
Examples
--------
>>> idx = pd.Index([np.nan, 'var1', np.nan])
>>> idx.get_indexer_for([np.nan])
array([0, 2])
"""
if self._index_as_unique:
return self.get_indexer(target)
indexer, _ = self.get_indexer_non_unique(target)
return indexer
def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:
"""
Analogue to get_indexer that raises if any elements are missing.
"""
keyarr = key
if not isinstance(keyarr, Index):
keyarr = com.asarray_tuplesafe(keyarr)
if self._index_as_unique:
indexer = self.get_indexer_for(keyarr)
keyarr = self.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)
self._raise_if_missing(keyarr, indexer, axis_name)
keyarr = self.take(indexer)
if isinstance(key, Index):
# GH 42790 - Preserve name from an Index
keyarr.name = key.name
if lib.is_np_dtype(keyarr.dtype, "mM") or isinstance(
keyarr.dtype, DatetimeTZDtype
):
# DTI/TDI.take can infer a freq in some cases when we dont want one
if isinstance(key, list) or (
isinstance(key, type(self))
# "Index" has no attribute "freq"
and key.freq is None # type: ignore[attr-defined]
):
keyarr = keyarr._with_freq(None)
return keyarr, indexer
def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:
"""
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis_name : str
Raises
------
KeyError
If at least one key was requested but none was found.
"""
if len(key) == 0:
return
# Count missing values
missing_mask = indexer < 0
nmissing = missing_mask.sum()
if nmissing:
if nmissing == len(indexer):
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())
raise KeyError(f"{not_found} not in index")
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[True] = ...
) -> npt.NDArray[np.intp]:
...
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[False]
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
...
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
...
@final
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Called from get_indexer or get_indexer_non_unique when the target
is of a non-comparable dtype.
For get_indexer lookups with method=None, get_indexer is an _equality_
check, so non-comparable dtypes mean we will always have no matches.
For get_indexer lookups with a method, get_indexer is an _inequality_
check, so non-comparable dtypes mean we will always raise TypeError.
Parameters
----------
target : Index
method : str or None
unique : bool, default True
* True if called from get_indexer.
* False if called from get_indexer_non_unique.
Raises
------
TypeError
If doing an inequality check, i.e. method is not None.
"""
if method is not None:
other_dtype = _unpack_nested_dtype(target)
raise TypeError(f"Cannot compare dtypes {self.dtype} and {other_dtype}")
no_matches = -1 * np.ones(target.shape, dtype=np.intp)
if unique:
# This is for get_indexer
return no_matches
else:
# This is for get_indexer_non_unique
missing = np.arange(len(target), dtype=np.intp)
return no_matches, missing
@property
def _index_as_unique(self) -> bool:
"""
Whether we should treat this as unique for the sake of
get_indexer vs get_indexer_non_unique.
For IntervalIndex compat.
"""
return self.is_unique
_requires_unique_msg = "Reindexing only valid with uniquely valued Index objects"
@final
def _maybe_downcast_for_indexing(self, other: Index) -> tuple[Index, Index]:
"""
When dealing with an object-dtype Index and a non-object Index, see
if we can upcast the object-dtype one to improve performance.
"""
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (
self.tz is not None
and other.tz is not None
and not tz_compare(self.tz, other.tz)
):
# standardize on UTC
return self.tz_convert("UTC"), other.tz_convert("UTC")
elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex):
try:
return type(other)(self), other
except OutOfBoundsDatetime:
return self, other
elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex):
# TODO: we dont have tests that get here
return type(other)(self), other
elif self.dtype.kind == "u" and other.dtype.kind == "i":
# GH#41873
if other.min() >= 0:
# lookup min as it may be cached
# TODO: may need itemsize check if we have non-64-bit Indexes
return self, other.astype(self.dtype)
elif self._is_multi and not other._is_multi:
try:
# "Type[Index]" has no attribute "from_tuples"
other = type(self).from_tuples(other) # type: ignore[attr-defined]
except (TypeError, ValueError):
# let's instead try with a straight Index
self = Index(self._values)
if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype):
# Reverse op so we dont need to re-implement on the subclasses
other, self = other._maybe_downcast_for_indexing(self)
return self, other
@final
def _find_common_type_compat(self, target) -> DtypeObj:
"""
Implementation of find_common_type that adjusts for Index-specific
special cases.
"""
target_dtype, _ = infer_dtype_from(target)
# special case: if one dtype is uint64 and the other a signed int, return object
# See https://github.com/pandas-dev/pandas/issues/26778 for discussion
# Now it's:
# * float | [u]int -> float
# * uint64 | signed int -> object
# We may change union(float | [u]int) to go to object.
if self.dtype == "uint64" or target_dtype == "uint64":
if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(
target_dtype
):
return _dtype_obj
dtype = find_result_type(self.dtype, target)
dtype = common_dtype_categorical_compat([self, target], dtype)
return dtype
@final
def _should_compare(self, other: Index) -> bool:
"""
Check if `self == other` can ever have non-False entries.
"""
# NB: we use inferred_type rather than is_bool_dtype to catch
# object_dtype_of_bool and categorical[object_dtype_of_bool] cases
if (
other.inferred_type == "boolean" and is_any_real_numeric_dtype(self.dtype)
) or (
self.inferred_type == "boolean" and is_any_real_numeric_dtype(other.dtype)
):
# GH#16877 Treat boolean labels passed to a numeric index as not
# found. Without this fix False and True would be treated as 0 and 1
# respectively.
return False
dtype = _unpack_nested_dtype(other)
return self._is_comparable_dtype(dtype) or is_object_dtype(dtype)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if self.dtype.kind == "b":
return dtype.kind == "b"
elif is_numeric_dtype(self.dtype):
return is_numeric_dtype(dtype)
# TODO: this was written assuming we only get here with object-dtype,
# which is no longer correct. Can we specialize for EA?
return True
@final
def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]:
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
if isinstance(values, ABCMultiIndex):
values = values._values
values = Categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in result.items()}
return PrettyDict(result)
def map(self, mapper, na_action: Literal["ignore"] | None = None):
"""
Map values using an input mapping or function.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
Union[Index, MultiIndex]
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.map({1: 'a', 2: 'b', 3: 'c'})
Index(['a', 'b', 'c'], dtype='object')
Using `map` with a function:
>>> idx = pd.Index([1, 2, 3])
>>> idx.map('I am a {}'.format)
Index(['I am a 1', 'I am a 2', 'I am a 3'], dtype='object')
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.map(lambda x: x.upper())
Index(['A', 'B', 'C'], dtype='object')
"""
from pandas.core.indexes.multi import MultiIndex
new_values = self._map_values(mapper, na_action=na_action)
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif self.name:
names = [self.name] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values, names=names)
dtype = None
if not new_values.size:
# empty
dtype = self.dtype
# e.g. if we are floating and new_values is all ints, then we
# don't want to cast back to floating. But if we are UInt64
# and new_values is all ints, we want to try.
same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type
if same_dtype:
new_values = maybe_cast_pointwise_result(
new_values, self.dtype, same_dtype=same_dtype
)
return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name)
# TODO: De-duplicate with map, xref GH#32349
@final
def _transform_index(self, func, *, level=None) -> Index:
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(self, ABCMultiIndex):
values = [
self.get_level_values(i).map(func)
if i == level or level is None
else self.get_level_values(i)
for i in range(self.nlevels)
]
return type(self).from_arrays(values)
else:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
def isin(self, values, level=None) -> npt.NDArray[np.bool_]:
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
np.ndarray[bool]
NumPy array of boolean values.
See Also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
(3, 'green')],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self._values, values)
def _get_string_slice(self, key: str_t):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(
self,
start: Hashable | None = None,
end: Hashable | None = None,
step: int | None = None,
) -> slice:
"""
Compute the slice indexer for input labels and step.
Index needs to be ordered and unique.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, default None
Returns
-------
slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
--------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3, None)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3, None)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index, then try to cast
to an int if equivalent.
"""
return key
def _maybe_cast_listlike_indexer(self, target) -> Index:
"""
Analogue to maybe_cast_indexer for get_indexer instead of get_loc.
"""
return ensure_index(target)
@final
def _validate_indexer(
self,
form: Literal["positional", "slice"],
key,
kind: Literal["getitem", "iloc"],
) -> None:
"""
If we are positional indexer, validate that we have appropriate
typed bounds must be an integer.
"""
if not lib.is_int_or_none(key):
self._raise_invalid_indexer(form, key)
def _maybe_cast_slice_bound(self, label, side: str_t):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. datetimelike Indexes
if is_numeric_dtype(self.dtype):
return self._maybe_cast_indexer(label)
# reject them, if index does not contain label
if (is_float(label) or is_integer(label)) and label not in self:
self._raise_invalid_indexer("slice", label)
return label
def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side="right" if side == "left" else "left"
)
return len(self) - pos
raise ValueError("index must be monotonic increasing or decreasing")
def get_slice_bound(self, label, side: Literal["left", "right"]) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
int
Index of label.
See Also
--------
Index.get_loc : Get integer location, slice or boolean mask for requested
label.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.get_slice_bound(3, 'left')
3
>>> idx.get_slice_bound(3, 'right')
4
If ``label`` is non-unique in the index, an error will be raised.
>>> idx_duplicate = pd.Index(['a', 'b', 'a', 'c', 'd'])
>>> idx_duplicate.get_slice_bound('a', 'left')
Traceback (most recent call last):
KeyError: Cannot get left slice bound for non-unique label: 'a'
"""
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg, must be either "
f"'left' or 'right': {side}"
)
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array, which
# is OK as long as they are representable by a slice.
assert is_bool_dtype(slc.dtype)
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
f"label: {repr(original_label)}"
)
if isinstance(slc, slice):
if side == "left":
return slc.start
else:
return slc.stop
else:
if side == "right":
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]:
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
Returns
-------
tuple[int, int]
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
--------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
"""
inc = step is None or step >= 0
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
# GH 16785: If start and end happen to be date strings with UTC offsets
# attempt to parse and check that the offsets are the same
if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):
try:
ts_start = Timestamp(start)
ts_end = Timestamp(end)
except (ValueError, TypeError):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
raise ValueError("Both dates must have the same UTC offset")
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, "left")
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, "right")
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc) -> Self:
"""
Make new Index with passed location(-s) deleted.
Parameters
----------
loc : int or list of int
Location of item(-s) which will be deleted.
Use a list of locations to delete more than one value at the same time.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
See Also
--------
numpy.delete : Delete any rows and column from NumPy array (ndarray).
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.delete(1)
Index(['a', 'c'], dtype='object')
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.delete([0, 2])
Index(['b'], dtype='object')
"""
values = self._values
res_values: ArrayLike
if isinstance(values, np.ndarray):
# TODO(__array_function__): special casing will be unnecessary
res_values = np.delete(values, loc)
else:
res_values = values.delete(loc)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
def insert(self, loc: int, item) -> Index:
"""
Make new Index inserting new item at location.
Follows Python numpy.insert semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
Index
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.insert(1, 'x')
Index(['a', 'x', 'b', 'c'], dtype='object')
"""
item = lib.item_from_zerodim(item)
if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:
item = self._na_value
arr = self._values
try:
if isinstance(arr, ExtensionArray):
res_values = arr.insert(loc, item)
return type(self)._simple_new(res_values, name=self.name)
else:
item = self._validate_fill_value(item)
except (TypeError, ValueError, LossySetitemError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype = self._find_common_type_compat(item)
return self.astype(dtype).insert(loc, item)
if arr.dtype != object or not isinstance(
item, (tuple, np.datetime64, np.timedelta64)
):
# with object-dtype we need to worry about numpy incorrectly casting
# dt64/td64 to integer, also about treating tuples as sequences
# special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550
casted = arr.dtype.type(item)
new_values = np.insert(arr, loc, casted)
else:
# error: No overload variant of "insert" matches argument types
# "ndarray[Any, Any]", "int", "None"
new_values = np.insert(arr, loc, None) # type: ignore[call-overload]
loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
out = Index._with_infer(new_values, name=self.name)
if (
using_pyarrow_string_dtype()
and is_string_dtype(out.dtype)
and new_values.dtype == object
):
out = out.astype(new_values.dtype)
if self.dtype == object and out.dtype != object:
# GH#51363
warnings.warn(
"The behavior of Index.insert with object-dtype is deprecated, "
"in a future version this will return an object-dtype Index "
"instead of inferring a non-object dtype. To retain the old "
"behavior, do `idx.insert(loc, item).infer_objects(copy=False)`",
FutureWarning,
stacklevel=find_stack_level(),
)
return out
def drop(
self,
labels: Index | np.ndarray | Iterable[Hashable],
errors: IgnoreRaise = "raise",
) -> Index:
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like or scalar
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
Raises
------
KeyError
If not all of the labels are found in the selected axis
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.drop(['a'])
Index(['b', 'c'], dtype='object')
"""
if not isinstance(labels, Index):
# avoid materializing e.g. RangeIndex
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer_for(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask].tolist()} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
@final
def infer_objects(self, copy: bool = True) -> Index:
"""
If we have an object dtype, try to infer a non-object dtype.
Parameters
----------
copy : bool, default True
Whether to make a copy in cases where no inference occurs.
"""
if self._is_multi:
raise NotImplementedError(
"infer_objects is not implemented for MultiIndex. "
"Use index.to_frame().infer_objects() instead."
)
if self.dtype != object:
return self.copy() if copy else self
values = self._values
values = cast("npt.NDArray[np.object_]", values)
res_values = lib.maybe_convert_objects(
values,
convert_non_numeric=True,
)
if copy and res_values is values:
return self.copy()
result = Index(res_values, name=self.name)
if not copy and res_values is values and self._references is not None:
result._references = self._references
result._references.add_index_reference(result)
return result
@final
def diff(self, periods: int = 1) -> Index:
"""
Computes the difference between consecutive values in the Index object.
If periods is greater than 1, computes the difference between values that
are `periods` number of positions apart.
Parameters
----------
periods : int, optional
The number of positions between the current and previous
value to compute the difference with. Default is 1.
Returns
-------
Index
A new Index object with the computed differences.
Examples
--------
>>> import pandas as pd
>>> idx = pd.Index([10, 20, 30, 40, 50])
>>> idx.diff()
Index([nan, 10.0, 10.0, 10.0, 10.0], dtype='float64')
"""
return Index(self.to_series().diff(periods))
@final
def round(self, decimals: int = 0) -> Self:
"""
Round each value in the Index to the given number of decimals.
Parameters
----------
decimals : int, optional
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
Returns
-------
Index
A new Index with the rounded values.
Examples
--------
>>> import pandas as pd
>>> idx = pd.Index([10.1234, 20.5678, 30.9123, 40.4567, 50.7890])
>>> idx.round(decimals=2)
Index([10.12, 20.57, 30.91, 40.46, 50.79], dtype='float64')
"""
return self._constructor(self.to_series().round(decimals))
# --------------------------------------------------------------------
# Generated Arithmetic, Comparison, and Unary Methods
def _cmp_method(self, other, op):
"""
Wrapper used to dispatch comparison operations.
"""
if self.is_(other):
# fastpath
if op in {operator.eq, operator.le, operator.ge}:
arr = np.ones(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
# TODO: should set MultiIndex._can_hold_na = False?
arr[self.isna()] = False
return arr
elif op is operator.ne:
arr = np.zeros(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
arr[self.isna()] = True
return arr
if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(
self
) != len(other):
raise ValueError("Lengths must match to compare")
if not isinstance(other, ABCMultiIndex):
other = extract_array(other, extract_numpy=True)
else:
other = np.asarray(other)
if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray):
# e.g. PeriodArray, Categorical
result = op(self._values, other)
elif isinstance(self._values, ExtensionArray):
result = op(self._values, other)
elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex):
# don't pass MultiIndex
result = ops.comp_method_OBJECT_ARRAY(op, self._values, other)
else:
result = ops.comparison_op(self._values, other, op)
return result
@final
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
@final
def _construct_result(self, result, name):
if isinstance(result, tuple):
return (
Index(result[0], name=name, dtype=result[0].dtype),
Index(result[1], name=name, dtype=result[1].dtype),
)
return Index(result, name=name, dtype=result.dtype)
def _arith_method(self, other, op):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
return super()._arith_method(other, op)
@final
def _unary_method(self, op):
result = op(self._values)
return Index(result, name=self.name)
def __abs__(self) -> Index:
return self._unary_method(operator.abs)
def __neg__(self) -> Index:
return self._unary_method(operator.neg)
def __pos__(self) -> Index:
return self._unary_method(operator.pos)
def __invert__(self) -> Index:
# GH#8875
return self._unary_method(operator.inv)
# --------------------------------------------------------------------
# Reductions
def any(self, *args, **kwargs):
"""
Return whether any element is Truthy.
Parameters
----------
*args
Required for compatibility with numpy.
**kwargs
Required for compatibility with numpy.
Returns
-------
bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
--------
Index.all : Return whether all elements are True.
Series.all : Return whether all elements are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
>>> index = pd.Index([0, 1, 2])
>>> index.any()
True
>>> index = pd.Index([0, 0, 0])
>>> index.any()
False
"""
nv.validate_any(args, kwargs)
self._maybe_disable_logical_methods("any")
vals = self._values
if not isinstance(vals, np.ndarray):
# i.e. EA, call _reduce instead of "any" to get TypeError instead
# of AttributeError
return vals._reduce("any")
return np.any(vals)
def all(self, *args, **kwargs):
"""
Return whether all elements are Truthy.
Parameters
----------
*args
Required for compatibility with numpy.
**kwargs
Required for compatibility with numpy.
Returns
-------
bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
--------
Index.any : Return whether any element in an Index is True.
Series.any : Return whether any element in a Series is True.
Series.all : Return whether all elements in a Series are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
True, because nonzero integers are considered True.
>>> pd.Index([1, 2, 3]).all()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 1, 2]).all()
False
"""
nv.validate_all(args, kwargs)
self._maybe_disable_logical_methods("all")
vals = self._values
if not isinstance(vals, np.ndarray):
# i.e. EA, call _reduce instead of "all" to get TypeError instead
# of AttributeError
return vals._reduce("all")
return np.all(vals)
@final
def _maybe_disable_logical_methods(self, opname: str_t) -> None:
"""
raise if this Index subclass does not support any or all.
"""
if (
isinstance(self, ABCMultiIndex)
# TODO(3.0): PeriodArray and DatetimeArray any/all will raise,
# so checking needs_i8_conversion will be unnecessary
or (needs_i8_conversion(self.dtype) and self.dtype.kind != "m")
):
# This call will raise
make_invalid_op(opname)(self)
@Appender(IndexOpsMixin.argmin.__doc__)
def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
warnings.warn(
f"The behavior of {type(self).__name__}.argmax/argmin "
"with skipna=False and NAs, or with all-NAs is deprecated. "
"In a future version this will raise ValueError.",
FutureWarning,
stacklevel=find_stack_level(),
)
return -1
return super().argmin(skipna=skipna)
@Appender(IndexOpsMixin.argmax.__doc__)
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
warnings.warn(
f"The behavior of {type(self).__name__}.argmax/argmin "
"with skipna=False and NAs, or with all-NAs is deprecated. "
"In a future version this will raise ValueError.",
FutureWarning,
stacklevel=find_stack_level(),
)
return -1
return super().argmax(skipna=skipna)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
if len(self) and self.is_monotonic_increasing:
# quick check
first = self[0]
if not isna(first):
return first
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return self._na_value
if not self._is_multi and not isinstance(self._values, np.ndarray):
return self._values._reduce(name="min", skipna=skipna)
return nanops.nanmin(self._values, skipna=skipna)
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
if len(self) and self.is_monotonic_increasing:
# quick check
last = self[-1]
if not isna(last):
return last
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return self._na_value
if not self._is_multi and not isinstance(self._values, np.ndarray):
return self._values._reduce(name="max", skipna=skipna)
return nanops.nanmax(self._values, skipna=skipna)
# --------------------------------------------------------------------
@final
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.shape
(3,)
"""
# See GH#27775, GH#27384 for history/reasoning in how this is defined.
return (len(self),)
|
(data=None, dtype=None, copy: 'bool' = False, name=None, tupleize_cols: 'bool' = True) -> 'Self'
|
66,363 |
pandas.core.indexes.base
|
__contains__
|
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
|
def __contains__(self, key: Any) -> bool:
"""
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
"""
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
|
(self, key: Any) -> bool
|
66,383 |
pandas.core.indexes.base
|
__new__
| null |
def __new__(
cls,
data=None,
dtype=None,
copy: bool = False,
name=None,
tupleize_cols: bool = True,
) -> Self:
from pandas.core.indexes.range import RangeIndex
name = maybe_extract_name(name, data, cls)
if dtype is not None:
dtype = pandas_dtype(dtype)
data_dtype = getattr(data, "dtype", None)
refs = None
if not copy and isinstance(data, (ABCSeries, Index)):
refs = data._references
is_pandas_object = isinstance(data, (ABCSeries, Index, ExtensionArray))
# range
if isinstance(data, (range, RangeIndex)):
result = RangeIndex(start=data, copy=copy, name=name)
if dtype is not None:
return result.astype(dtype, copy=False)
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
return result # type: ignore[return-value]
elif is_ea_or_datetimelike_dtype(dtype):
# non-EA dtype indexes have special casting logic, so we punt here
pass
elif is_ea_or_datetimelike_dtype(data_dtype):
pass
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if isinstance(data, ABCMultiIndex):
data = data._values
if data.dtype.kind not in "iufcbmM":
# GH#11836 we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
elif is_scalar(data):
raise cls._raise_scalar_data_error(data)
elif hasattr(data, "__array__"):
return cls(np.asarray(data), dtype=dtype, copy=copy, name=name)
elif not is_list_like(data) and not isinstance(data, memoryview):
# 2022-11-16 the memoryview check is only necessary on some CI
# builds, not clear why
raise cls._raise_scalar_data_error(data)
else:
if tupleize_cols:
# GH21470: convert iterable to list before determining if empty
if is_iterator(data):
data = list(data)
if data and all(isinstance(e, tuple) for e in data):
# we must be all tuples, otherwise don't construct
# 10697
from pandas.core.indexes.multi import MultiIndex
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
return MultiIndex.from_tuples( # type: ignore[return-value]
data, names=name
)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
# we allow set/frozenset, which Series/sanitize_array does not, so
# cast to list here
data = list(data)
if len(data) == 0:
# unlike Series, we default to object dtype:
data = np.array(data, dtype=object)
if len(data) and isinstance(data[0], tuple):
# Ensure we get 1-D array of tuples instead of 2D array.
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
try:
arr = sanitize_array(data, None, dtype=dtype, copy=copy)
except ValueError as err:
if "index must be specified when data is not list-like" in str(err):
raise cls._raise_scalar_data_error(data) from err
if "Data must be 1-dimensional" in str(err):
raise ValueError("Index data must be 1-dimensional") from err
raise
arr = ensure_wrapped_if_datetimelike(arr)
klass = cls._dtype_to_subclass(arr.dtype)
arr = klass._ensure_array(arr, arr.dtype, copy=False)
result = klass._simple_new(arr, name, refs=refs)
if dtype is None and is_pandas_object and data_dtype == np.object_:
if result.dtype != data_dtype:
warnings.warn(
"Dtype inference on a pandas object "
"(Series, Index, ExtensionArray) is deprecated. The Index "
"constructor will keep the original dtype in the future. "
"Call `infer_objects` on the result to get the old "
"behavior.",
FutureWarning,
stacklevel=2,
)
return result # type: ignore[return-value]
|
(cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True) -> NoneType
|
66,427 |
pandas.core.indexes.base
|
_format_attrs
|
Return a list of tuples of the (attr,formatted_value).
|
def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]:
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs: list[tuple[str_t, str_t | int | bool | None]] = []
if not self._is_multi:
attrs.append(("dtype", f"'{self.dtype}'"))
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
elif self._is_multi and any(x is not None for x in self.names):
attrs.append(("names", default_pprint(self.names)))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
|
(self) -> list[tuple[str, str | int | bool | None]]
|
66,432 |
pandas.core.indexes.base
|
_from_join_target
|
Cast the ndarray returned from one of the libjoin.foo_indexer functions
back to type(self._data).
|
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
"""
Cast the ndarray returned from one of the libjoin.foo_indexer functions
back to type(self._data).
"""
if isinstance(self.values, BaseMaskedArray):
return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_))
elif isinstance(self.values, (ArrowExtensionArray, StringArray)):
return type(self.values)._from_sequence(result, dtype=self.dtype)
return result
|
(self, result: numpy.ndarray) -> Union[pandas.core.arrays.base.ExtensionArray, numpy.ndarray]
|
66,434 |
pandas.core.indexes.base
|
_get_engine_target
|
Get the ndarray or ExtensionArray that we can pass to the IndexEngine
constructor.
|
def _get_engine_target(self) -> ArrayLike:
"""
Get the ndarray or ExtensionArray that we can pass to the IndexEngine
constructor.
"""
vals = self._values
if isinstance(vals, StringArray):
# GH#45652 much more performant than ExtensionEngine
return vals._ndarray
if isinstance(vals, ArrowExtensionArray) and self.dtype.kind in "Mm":
import pyarrow as pa
pa_type = vals._pa_array.type
if pa.types.is_timestamp(pa_type):
vals = vals._to_datetimearray()
return vals._ndarray.view("i8")
elif pa.types.is_duration(pa_type):
vals = vals._to_timedeltaarray()
return vals._ndarray.view("i8")
if (
type(self) is Index
and isinstance(self._values, ExtensionArray)
and not isinstance(self._values, BaseMaskedArray)
and not (
isinstance(self._values, ArrowExtensionArray)
and is_numeric_dtype(self.dtype)
# Exclude decimal
and self.dtype.kind != "O"
)
):
# TODO(ExtensionIndex): remove special-case, just use self._values
return self._values.astype(object)
return vals
|
(self) -> Union[pandas.core.arrays.base.ExtensionArray, numpy.ndarray]
|
66,454 |
pandas.core.indexes.base
|
_is_comparable_dtype
|
Can we compare values of the given dtype to our own?
|
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if self.dtype.kind == "b":
return dtype.kind == "b"
elif is_numeric_dtype(self.dtype):
return is_numeric_dtype(dtype)
# TODO: this was written assuming we only get here with object-dtype,
# which is no longer correct. Can we specialize for EA?
return True
|
(self, dtype: Union[numpy.dtype, pandas.core.dtypes.base.ExtensionDtype]) -> bool
|
66,467 |
pandas.core.indexes.base
|
_maybe_cast_listlike_indexer
|
Analogue to maybe_cast_indexer for get_indexer instead of get_loc.
|
def _maybe_cast_listlike_indexer(self, target) -> Index:
"""
Analogue to maybe_cast_indexer for get_indexer instead of get_loc.
"""
return ensure_index(target)
|
(self, target) -> pandas.core.indexes.base.Index
|
66,494 |
pandas.core.indexes.base
|
_validate_fill_value
|
Check if the value can be inserted into our array without casting,
and convert it to an appropriate native type if necessary.
Raises
------
TypeError
If the value cannot be inserted into an array of this dtype.
|
def _validate_fill_value(self, value):
"""
Check if the value can be inserted into our array without casting,
and convert it to an appropriate native type if necessary.
Raises
------
TypeError
If the value cannot be inserted into an array of this dtype.
"""
dtype = self.dtype
if isinstance(dtype, np.dtype) and dtype.kind not in "mM":
# return np_can_hold_element(dtype, value)
try:
return np_can_hold_element(dtype, value)
except LossySetitemError as err:
# re-raise as TypeError for consistency
raise TypeError from err
elif not can_hold_element(self._values, value):
raise TypeError
return value
|
(self, value)
|
66,524 |
pandas.core.indexes.base
|
equals
|
Determine if two Index object are equal.
The things that are being compared are:
* The elements inside the Index object.
* The order of the elements inside the Index object.
Parameters
----------
other : Any
The other object to compare against.
Returns
-------
bool
True if "other" is an Index and it has the same elements and order
as the calling index; False otherwise.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx1
Index([1, 2, 3], dtype='int64')
>>> idx1.equals(pd.Index([1, 2, 3]))
True
The elements inside are compared
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2
Index(['1', '2', '3'], dtype='object')
>>> idx1.equals(idx2)
False
The order is compared
>>> ascending_idx = pd.Index([1, 2, 3])
>>> ascending_idx
Index([1, 2, 3], dtype='int64')
>>> descending_idx = pd.Index([3, 2, 1])
>>> descending_idx
Index([3, 2, 1], dtype='int64')
>>> ascending_idx.equals(descending_idx)
False
The dtype is *not* compared
>>> int64_idx = pd.Index([1, 2, 3], dtype='int64')
>>> int64_idx
Index([1, 2, 3], dtype='int64')
>>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64')
>>> uint64_idx
Index([1, 2, 3], dtype='uint64')
>>> int64_idx.equals(uint64_idx)
True
|
def equals(self, other: Any) -> bool:
"""
Determine if two Index object are equal.
The things that are being compared are:
* The elements inside the Index object.
* The order of the elements inside the Index object.
Parameters
----------
other : Any
The other object to compare against.
Returns
-------
bool
True if "other" is an Index and it has the same elements and order
as the calling index; False otherwise.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx1
Index([1, 2, 3], dtype='int64')
>>> idx1.equals(pd.Index([1, 2, 3]))
True
The elements inside are compared
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2
Index(['1', '2', '3'], dtype='object')
>>> idx1.equals(idx2)
False
The order is compared
>>> ascending_idx = pd.Index([1, 2, 3])
>>> ascending_idx
Index([1, 2, 3], dtype='int64')
>>> descending_idx = pd.Index([3, 2, 1])
>>> descending_idx
Index([3, 2, 1], dtype='int64')
>>> ascending_idx.equals(descending_idx)
False
The dtype is *not* compared
>>> int64_idx = pd.Index([1, 2, 3], dtype='int64')
>>> int64_idx
Index([1, 2, 3], dtype='int64')
>>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64')
>>> uint64_idx
Index([1, 2, 3], dtype='uint64')
>>> int64_idx.equals(uint64_idx)
True
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
# quickly return if the lengths are different
return False
if (
isinstance(self.dtype, StringDtype)
and self.dtype.storage == "pyarrow_numpy"
and other.dtype != self.dtype
):
# special case for object behavior
return other.equals(self.astype(object))
if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):
# if other is not object, use other's logic for coercion
return other.equals(self)
if isinstance(other, ABCMultiIndex):
# d-level MultiIndex can equal d-tuple Index
return other.equals(self)
if isinstance(self._values, ExtensionArray):
# Dispatch to the ExtensionArray's .equals method.
if not isinstance(other, type(self)):
return False
earr = cast(ExtensionArray, self._data)
return earr.equals(other._data)
if isinstance(other.dtype, ExtensionDtype):
# All EA-backed Index subclasses override equals
return other.equals(self)
return array_equivalent(self._values, other._values)
|
(self, other: Any) -> bool
|
66,588 |
pandas.core.arrays.integer
|
Int16Dtype
|
An ExtensionDtype for int16 integer data.
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
Examples
--------
For Int8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
>>> ser.dtype
Int8Dtype()
For Int16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
>>> ser.dtype
Int16Dtype()
For Int32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
>>> ser.dtype
Int32Dtype()
For Int64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
>>> ser.dtype
Int64Dtype()
For UInt8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
>>> ser.dtype
UInt8Dtype()
For UInt16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
>>> ser.dtype
UInt16Dtype()
For UInt32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
>>> ser.dtype
UInt32Dtype()
For UInt64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
>>> ser.dtype
UInt64Dtype()
|
class Int16Dtype(IntegerDtype):
type = np.int16
name: ClassVar[str] = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
|
()
|
66,595 |
pandas.core.dtypes.common
|
is_integer_dtype
|
Check whether the provided array or dtype is of an integer dtype.
Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an integer dtype and
not an instance of timedelta64.
Examples
--------
>>> from pandas.api.types import is_integer_dtype
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype('int8')
True
>>> is_integer_dtype('Int8')
True
>>> is_integer_dtype(pd.Int8Dtype)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(['a', 'b']))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.])) # float
False
|
def is_integer_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of an integer dtype.
Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an integer dtype and
not an instance of timedelta64.
Examples
--------
>>> from pandas.api.types import is_integer_dtype
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype('int8')
True
>>> is_integer_dtype('Int8')
True
>>> is_integer_dtype(pd.Int8Dtype)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(['a', 'b']))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.])) # float
False
"""
return _is_dtype_type(
arr_or_dtype, _classes_and_not_datetimelike(np.integer)
) or _is_dtype(
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"
)
|
(arr_or_dtype) -> bool
|
66,598 |
pandas.core.arrays.integer
|
Int32Dtype
|
An ExtensionDtype for int32 integer data.
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
Examples
--------
For Int8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
>>> ser.dtype
Int8Dtype()
For Int16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
>>> ser.dtype
Int16Dtype()
For Int32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
>>> ser.dtype
Int32Dtype()
For Int64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
>>> ser.dtype
Int64Dtype()
For UInt8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
>>> ser.dtype
UInt8Dtype()
For UInt16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
>>> ser.dtype
UInt16Dtype()
For UInt32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
>>> ser.dtype
UInt32Dtype()
For UInt64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
>>> ser.dtype
UInt64Dtype()
|
class Int32Dtype(IntegerDtype):
type = np.int32
name: ClassVar[str] = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
|
()
|
66,608 |
pandas.core.arrays.integer
|
Int64Dtype
|
An ExtensionDtype for int64 integer data.
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
Examples
--------
For Int8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
>>> ser.dtype
Int8Dtype()
For Int16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
>>> ser.dtype
Int16Dtype()
For Int32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
>>> ser.dtype
Int32Dtype()
For Int64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
>>> ser.dtype
Int64Dtype()
For UInt8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
>>> ser.dtype
UInt8Dtype()
For UInt16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
>>> ser.dtype
UInt16Dtype()
For UInt32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
>>> ser.dtype
UInt32Dtype()
For UInt64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
>>> ser.dtype
UInt64Dtype()
|
class Int64Dtype(IntegerDtype):
type = np.int64
name: ClassVar[str] = "Int64"
__doc__ = _dtype_docstring.format(dtype="int64")
|
()
|
66,618 |
pandas.core.arrays.integer
|
Int8Dtype
|
An ExtensionDtype for int8 integer data.
Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
Examples
--------
For Int8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
>>> ser.dtype
Int8Dtype()
For Int16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
>>> ser.dtype
Int16Dtype()
For Int32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
>>> ser.dtype
Int32Dtype()
For Int64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
>>> ser.dtype
Int64Dtype()
For UInt8Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
>>> ser.dtype
UInt8Dtype()
For UInt16Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
>>> ser.dtype
UInt16Dtype()
For UInt32Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
>>> ser.dtype
UInt32Dtype()
For UInt64Dtype:
>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
>>> ser.dtype
UInt64Dtype()
|
class Int8Dtype(IntegerDtype):
type = np.int8
name: ClassVar[str] = "Int8"
__doc__ = _dtype_docstring.format(dtype="int8")
|
()
|
66,628 |
pandas._libs.interval
|
Interval
|
Immutable object implementing an Interval, a bounded slice-like interval.
Parameters
----------
left : orderable scalar
Left bound for the interval.
right : orderable scalar
Right bound for the interval.
closed : {'right', 'left', 'both', 'neither'}, default 'right'
Whether the interval is closed on the left-side, right-side, both or
neither. See the Notes for more detailed explanation.
See Also
--------
IntervalIndex : An Index of Interval objects that are all closed on the
same side.
cut : Convert continuous data into discrete bins (Categorical
of Interval objects).
qcut : Convert continuous data into bins (Categorical of Interval objects)
based on quantiles.
Period : Represents a period of time.
Notes
-----
The parameters `left` and `right` must be from the same type, you must be
able to compare them and they must satisfy ``left <= right``.
A closed interval (in mathematics denoted by square brackets) contains
its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the
conditions ``0 <= x <= 5``. This is what ``closed='both'`` stands for.
An open interval (in mathematics denoted by parentheses) does not contain
its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the
conditions ``0 < x < 5``. This is what ``closed='neither'`` stands for.
Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is
described by ``0 <= x < 5`` (``closed='left'``) and ``(0, 5]`` is
described by ``0 < x <= 5`` (``closed='right'``).
Examples
--------
It is possible to build Intervals of different types, like numeric ones:
>>> iv = pd.Interval(left=0, right=5)
>>> iv
Interval(0, 5, closed='right')
You can check if an element belongs to it, or if it contains another interval:
>>> 2.5 in iv
True
>>> pd.Interval(left=2, right=5, closed='both') in iv
True
You can test the bounds (``closed='right'``, so ``0 < x <= 5``):
>>> 0 in iv
False
>>> 5 in iv
True
>>> 0.0001 in iv
True
Calculate its length
>>> iv.length
5
You can operate with `+` and `*` over an Interval and the operation
is applied to each of its bounds, so the result depends on the type
of the bound elements
>>> shifted_iv = iv + 3
>>> shifted_iv
Interval(3, 8, closed='right')
>>> extended_iv = iv * 10.0
>>> extended_iv
Interval(0.0, 50.0, closed='right')
To create a time interval you can use Timestamps as the bounds
>>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'),
... pd.Timestamp('2018-01-01 00:00:00'),
... closed='left')
>>> pd.Timestamp('2017-01-01 00:00') in year_2017
True
>>> year_2017.length
Timedelta('365 days 00:00:00')
|
from pandas._libs.interval import Interval
| null |
66,629 |
pandas.core.dtypes.dtypes
|
IntervalDtype
|
An ExtensionDtype for Interval data.
**This is not an actual numpy dtype**, but a duck type.
Parameters
----------
subtype : str, np.dtype
The dtype of the Interval bounds.
Attributes
----------
subtype
Methods
-------
None
Examples
--------
>>> pd.IntervalDtype(subtype='int64', closed='both')
interval[int64, both]
|
class IntervalDtype(PandasExtensionDtype):
"""
An ExtensionDtype for Interval data.
**This is not an actual numpy dtype**, but a duck type.
Parameters
----------
subtype : str, np.dtype
The dtype of the Interval bounds.
Attributes
----------
subtype
Methods
-------
None
Examples
--------
>>> pd.IntervalDtype(subtype='int64', closed='both')
interval[int64, both]
"""
name = "interval"
kind: str_type = "O"
str = "|O08"
base = np.dtype("O")
num = 103
_metadata = (
"subtype",
"closed",
)
_match = re.compile(
r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)"
r"(, (?P<closed>(right|left|both|neither)))?\]"
)
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
_subtype: None | np.dtype
_closed: IntervalClosedType | None
def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
from pandas.core.dtypes.common import (
is_string_dtype,
pandas_dtype,
)
if closed is not None and closed not in {"right", "left", "both", "neither"}:
raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
if isinstance(subtype, IntervalDtype):
if closed is not None and closed != subtype.closed:
raise ValueError(
"dtype.closed and 'closed' do not match. "
"Try IntervalDtype(dtype.subtype, closed) instead."
)
self._subtype = subtype._subtype
self._closed = subtype._closed
elif subtype is None:
# we are called as an empty constructor
# generally for pickle compat
self._subtype = None
self._closed = closed
elif isinstance(subtype, str) and subtype.lower() == "interval":
self._subtype = None
self._closed = closed
else:
if isinstance(subtype, str):
m = IntervalDtype._match.search(subtype)
if m is not None:
gd = m.groupdict()
subtype = gd["subtype"]
if gd.get("closed", None) is not None:
if closed is not None:
if closed != gd["closed"]:
raise ValueError(
"'closed' keyword does not match value "
"specified in dtype string"
)
closed = gd["closed"] # type: ignore[assignment]
try:
subtype = pandas_dtype(subtype)
except TypeError as err:
raise TypeError("could not construct IntervalDtype") from err
if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalDtype"
)
raise TypeError(msg)
self._subtype = subtype
self._closed = closed
@cache_readonly
def _can_hold_na(self) -> bool:
subtype = self._subtype
if subtype is None:
# partially-initialized
raise NotImplementedError(
"_can_hold_na is not defined for partially-initialized IntervalDtype"
)
if subtype.kind in "iu":
return False
return True
@property
def closed(self) -> IntervalClosedType:
return self._closed # type: ignore[return-value]
@property
def subtype(self):
"""
The dtype of the Interval bounds.
Examples
--------
>>> dtype = pd.IntervalDtype(subtype='int64', closed='both')
>>> dtype.subtype
dtype('int64')
"""
return self._subtype
@classmethod
def construct_array_type(cls) -> type[IntervalArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays import IntervalArray
return IntervalArray
@classmethod
def construct_from_string(cls, string: str_type) -> IntervalDtype:
"""
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string.lower() == "interval" or cls._match.search(string) is not None:
return cls(string)
msg = (
f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n"
"Incorrectly formatted string passed to constructor. "
"Valid formats include Interval or Interval[dtype] "
"where dtype is numeric, datetime, or timedelta"
)
raise TypeError(msg)
@property
def type(self) -> type[Interval]:
return Interval
def __str__(self) -> str_type:
if self.subtype is None:
return "interval"
if self.closed is None:
# Only partially initialized GH#38394
return f"interval[{self.subtype}]"
return f"interval[{self.subtype}, {self.closed}]"
def __hash__(self) -> int:
# make myself hashable
return hash(str(self))
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return other.lower() in (self.name.lower(), str(self).lower())
elif not isinstance(other, IntervalDtype):
return False
elif self.subtype is None or other.subtype is None:
# None should match any subtype
return True
elif self.closed != other.closed:
return False
else:
return self.subtype == other.subtype
def __setstate__(self, state) -> None:
# for pickle compat. __get_state__ is defined in the
# PandasExtensionDtype superclass and uses the public properties to
# pickle -> need to set the settable private ones here (see GH26067)
self._subtype = state["subtype"]
# backward-compat older pickles won't have "closed" key
self._closed = state.pop("closed", None)
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Return a boolean if we if the passed type is an actual dtype that we
can match (via string or type)
"""
if isinstance(dtype, str):
if dtype.lower().startswith("interval"):
try:
return cls.construct_from_string(dtype) is not None
except (ValueError, TypeError):
return False
else:
return False
return super().is_dtype(dtype)
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
"""
Construct IntervalArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays import IntervalArray
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
chunks = array.chunks
results = []
for arr in chunks:
if isinstance(arr, pyarrow.ExtensionArray):
arr = arr.storage
left = np.asarray(arr.field("left"), dtype=self.subtype)
right = np.asarray(arr.field("right"), dtype=self.subtype)
iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
results.append(iarr)
if not results:
return IntervalArray.from_arrays(
np.array([], dtype=self.subtype),
np.array([], dtype=self.subtype),
closed=self.closed,
)
return IntervalArray._concat_same_type(results)
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
if not all(isinstance(x, IntervalDtype) for x in dtypes):
return None
closed = cast("IntervalDtype", dtypes[0]).closed
if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
return np.dtype(object)
from pandas.core.dtypes.cast import find_common_type
common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
if common == object:
return np.dtype(object)
return IntervalDtype(common, closed=closed)
@cache_readonly
def index_class(self) -> type_t[IntervalIndex]:
from pandas import IntervalIndex
return IntervalIndex
|
(subtype=None, closed: 'IntervalClosedType | None' = None) -> 'None'
|
66,630 |
pandas.core.dtypes.dtypes
|
__eq__
| null |
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return other.lower() in (self.name.lower(), str(self).lower())
elif not isinstance(other, IntervalDtype):
return False
elif self.subtype is None or other.subtype is None:
# None should match any subtype
return True
elif self.closed != other.closed:
return False
else:
return self.subtype == other.subtype
|
(self, other: object) -> bool
|
66,631 |
pandas.core.dtypes.dtypes
|
__from_arrow__
|
Construct IntervalArray from pyarrow Array/ChunkedArray.
|
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:
"""
Construct IntervalArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays import IntervalArray
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
chunks = array.chunks
results = []
for arr in chunks:
if isinstance(arr, pyarrow.ExtensionArray):
arr = arr.storage
left = np.asarray(arr.field("left"), dtype=self.subtype)
right = np.asarray(arr.field("right"), dtype=self.subtype)
iarr = IntervalArray.from_arrays(left, right, closed=self.closed)
results.append(iarr)
if not results:
return IntervalArray.from_arrays(
np.array([], dtype=self.subtype),
np.array([], dtype=self.subtype),
closed=self.closed,
)
return IntervalArray._concat_same_type(results)
|
(self, array: 'pa.Array | pa.ChunkedArray') -> 'IntervalArray'
|
66,634 |
pandas.core.dtypes.dtypes
|
__init__
| null |
def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None:
from pandas.core.dtypes.common import (
is_string_dtype,
pandas_dtype,
)
if closed is not None and closed not in {"right", "left", "both", "neither"}:
raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'")
if isinstance(subtype, IntervalDtype):
if closed is not None and closed != subtype.closed:
raise ValueError(
"dtype.closed and 'closed' do not match. "
"Try IntervalDtype(dtype.subtype, closed) instead."
)
self._subtype = subtype._subtype
self._closed = subtype._closed
elif subtype is None:
# we are called as an empty constructor
# generally for pickle compat
self._subtype = None
self._closed = closed
elif isinstance(subtype, str) and subtype.lower() == "interval":
self._subtype = None
self._closed = closed
else:
if isinstance(subtype, str):
m = IntervalDtype._match.search(subtype)
if m is not None:
gd = m.groupdict()
subtype = gd["subtype"]
if gd.get("closed", None) is not None:
if closed is not None:
if closed != gd["closed"]:
raise ValueError(
"'closed' keyword does not match value "
"specified in dtype string"
)
closed = gd["closed"] # type: ignore[assignment]
try:
subtype = pandas_dtype(subtype)
except TypeError as err:
raise TypeError("could not construct IntervalDtype") from err
if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalDtype"
)
raise TypeError(msg)
self._subtype = subtype
self._closed = closed
|
(self, subtype=None, closed: 'IntervalClosedType | None' = None) -> 'None'
|
66,637 |
pandas.core.dtypes.dtypes
|
__setstate__
| null |
def __setstate__(self, state) -> None:
# for pickle compat. __get_state__ is defined in the
# PandasExtensionDtype superclass and uses the public properties to
# pickle -> need to set the settable private ones here (see GH26067)
self._subtype = state["subtype"]
# backward-compat older pickles won't have "closed" key
self._closed = state.pop("closed", None)
|
(self, state) -> NoneType
|
66,638 |
pandas.core.dtypes.dtypes
|
__str__
| null |
def __str__(self) -> str_type:
if self.subtype is None:
return "interval"
if self.closed is None:
# Only partially initialized GH#38394
return f"interval[{self.subtype}]"
return f"interval[{self.subtype}, {self.closed}]"
|
(self) -> str
|
66,639 |
pandas.core.dtypes.dtypes
|
_get_common_dtype
| null |
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
if not all(isinstance(x, IntervalDtype) for x in dtypes):
return None
closed = cast("IntervalDtype", dtypes[0]).closed
if not all(cast("IntervalDtype", x).closed == closed for x in dtypes):
return np.dtype(object)
from pandas.core.dtypes.cast import find_common_type
common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes])
if common == object:
return np.dtype(object)
return IntervalDtype(common, closed=closed)
|
(self, dtypes: 'list[DtypeObj]') -> 'DtypeObj | None'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.