index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
68,365 |
pandas.io.orc
|
read_orc
|
Load an ORC object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.orc``.
columns : list, default None
If not None, only these columns will be read from the file.
Output always follows the ordering of the file and not the columns list.
This mirrors the original behaviour of
:external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
filesystem : fsspec or pyarrow filesystem, default None
Filesystem object to use when reading the parquet file.
.. versionadded:: 2.1.0
**kwargs
Any additional kwargs are passed to pyarrow.
Returns
-------
DataFrame
Notes
-----
Before using this function you should read the :ref:`user guide about ORC <io.orc>`
and :ref:`install optional dependencies <install.warn_orc>`.
If ``path`` is a URI scheme pointing to a local or remote file (e.g. "s3://"),
a ``pyarrow.fs`` filesystem will be attempted to read the file. You can also pass a
pyarrow or fsspec filesystem object into the filesystem keyword to override this
behavior.
Examples
--------
>>> result = pd.read_orc("example_pa.orc") # doctest: +SKIP
|
def read_orc(
path: FilePath | ReadBuffer[bytes],
columns: list[str] | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None = None,
**kwargs: Any,
) -> DataFrame:
"""
Load an ORC object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.orc``.
columns : list, default None
If not None, only these columns will be read from the file.
Output always follows the ordering of the file and not the columns list.
This mirrors the original behaviour of
:external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
filesystem : fsspec or pyarrow filesystem, default None
Filesystem object to use when reading the parquet file.
.. versionadded:: 2.1.0
**kwargs
Any additional kwargs are passed to pyarrow.
Returns
-------
DataFrame
Notes
-----
Before using this function you should read the :ref:`user guide about ORC <io.orc>`
and :ref:`install optional dependencies <install.warn_orc>`.
If ``path`` is a URI scheme pointing to a local or remote file (e.g. "s3://"),
a ``pyarrow.fs`` filesystem will be attempted to read the file. You can also pass a
pyarrow or fsspec filesystem object into the filesystem keyword to override this
behavior.
Examples
--------
>>> result = pd.read_orc("example_pa.orc") # doctest: +SKIP
"""
# we require a newer version of pyarrow than we support for parquet
orc = import_optional_dependency("pyarrow.orc")
check_dtype_backend(dtype_backend)
with get_handle(path, "rb", is_text=False) as handles:
source = handles.handle
if is_fsspec_url(path) and filesystem is None:
pa = import_optional_dependency("pyarrow")
pa_fs = import_optional_dependency("pyarrow.fs")
try:
filesystem, source = pa_fs.FileSystem.from_uri(path)
except (TypeError, pa.ArrowInvalid):
pass
pa_table = orc.read_table(
source=source, columns=columns, filesystem=filesystem, **kwargs
)
if dtype_backend is not lib.no_default:
if dtype_backend == "pyarrow":
df = pa_table.to_pandas(types_mapper=pd.ArrowDtype)
else:
from pandas.io._util import _arrow_dtype_mapping
mapping = _arrow_dtype_mapping()
df = pa_table.to_pandas(types_mapper=mapping.get)
return df
else:
if using_pyarrow_string_dtype():
types_mapper = arrow_string_types_mapper()
else:
types_mapper = None
return pa_table.to_pandas(types_mapper=types_mapper)
|
(path: 'FilePath | ReadBuffer[bytes]', columns: 'list[str] | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, filesystem: 'pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None' = None, **kwargs: 'Any') -> 'DataFrame'
|
68,366 |
pandas.io.parquet
|
read_parquet
|
Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gs, and file. For file URLs, a host is expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
When using the ``'pyarrow'`` engine and no storage options are provided
and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
(e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
Use the filesystem keyword with an instantiated fsspec filesystem
if you wish to use its implementation.
columns : list, default=None
If not None, only these columns will be read from the file.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
.. versionadded:: 1.3.0
use_nullable_dtypes : bool, default False
If True, use dtypes that use ``pd.NA`` as missing value indicator
for the resulting DataFrame. (only applicable for the ``pyarrow``
engine)
As new dtypes are added that support ``pd.NA`` in the future, the
output with this option will change to use those dtypes.
Note: this is an experimental option, and behaviour (e.g. additional
support dtypes) may change without notice.
.. deprecated:: 2.0
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
filesystem : fsspec or pyarrow filesystem, default None
Filesystem object to use when reading the parquet file. Only implemented
for ``engine="pyarrow"``.
.. versionadded:: 2.1.0
filters : List[Tuple] or List[List[Tuple]], default None
To filter out data.
Filter syntax: [[(column, op, val), ...],...]
where op is [==, =, >, >=, <, <=, !=, in, not in]
The innermost tuples are transposed into a set of filters applied
through an `AND` operation.
The outer list combines these sets of filters through an `OR`
operation.
A single list of tuples can also be used, meaning that no `OR`
operation between set of filters is to be conducted.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow"`` is also specified. For
other engines, filtering is only performed at the partition level, that is,
to prevent the loading of some row-groups and/or files.
.. versionadded:: 2.1.0
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.
Examples
--------
>>> original_df = pd.DataFrame(
... {"foo": range(5), "bar": range(5, 10)}
... )
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> df_parquet_bytes = original_df.to_parquet()
>>> from io import BytesIO
>>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))
>>> restored_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> restored_df.equals(original_df)
True
>>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])
>>> restored_bar
bar
0 5
1 6
2 7
3 8
4 9
>>> restored_bar.equals(original_df[['bar']])
True
The function uses `kwargs` that are passed directly to the engine.
In the following example, we use the `filters` argument of the pyarrow
engine to filter the rows of the DataFrame.
Since `pyarrow` is the default engine, we can omit the `engine` argument.
Note that the `filters` argument is implemented by the `pyarrow` engine,
which can benefit from multithreading and also potentially be more
economical in terms of memory.
>>> sel = [("foo", ">", 2)]
>>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)
>>> restored_part
foo bar
0 3 8
1 4 9
|
@doc(storage_options=_shared_docs["storage_options"])
def read_parquet(
path: FilePath | ReadBuffer[bytes],
engine: str = "auto",
columns: list[str] | None = None,
storage_options: StorageOptions | None = None,
use_nullable_dtypes: bool | lib.NoDefault = lib.no_default,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
filesystem: Any = None,
filters: list[tuple] | list[list[tuple]] | None = None,
**kwargs,
) -> DataFrame:
"""
Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gs, and file. For file URLs, a host is expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
When using the ``'pyarrow'`` engine and no storage options are provided
and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
(e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
Use the filesystem keyword with an instantiated fsspec filesystem
if you wish to use its implementation.
columns : list, default=None
If not None, only these columns will be read from the file.
{storage_options}
.. versionadded:: 1.3.0
use_nullable_dtypes : bool, default False
If True, use dtypes that use ``pd.NA`` as missing value indicator
for the resulting DataFrame. (only applicable for the ``pyarrow``
engine)
As new dtypes are added that support ``pd.NA`` in the future, the
output with this option will change to use those dtypes.
Note: this is an experimental option, and behaviour (e.g. additional
support dtypes) may change without notice.
.. deprecated:: 2.0
dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
filesystem : fsspec or pyarrow filesystem, default None
Filesystem object to use when reading the parquet file. Only implemented
for ``engine="pyarrow"``.
.. versionadded:: 2.1.0
filters : List[Tuple] or List[List[Tuple]], default None
To filter out data.
Filter syntax: [[(column, op, val), ...],...]
where op is [==, =, >, >=, <, <=, !=, in, not in]
The innermost tuples are transposed into a set of filters applied
through an `AND` operation.
The outer list combines these sets of filters through an `OR`
operation.
A single list of tuples can also be used, meaning that no `OR`
operation between set of filters is to be conducted.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow"`` is also specified. For
other engines, filtering is only performed at the partition level, that is,
to prevent the loading of some row-groups and/or files.
.. versionadded:: 2.1.0
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.
Examples
--------
>>> original_df = pd.DataFrame(
... {{"foo": range(5), "bar": range(5, 10)}}
... )
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> df_parquet_bytes = original_df.to_parquet()
>>> from io import BytesIO
>>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))
>>> restored_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> restored_df.equals(original_df)
True
>>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])
>>> restored_bar
bar
0 5
1 6
2 7
3 8
4 9
>>> restored_bar.equals(original_df[['bar']])
True
The function uses `kwargs` that are passed directly to the engine.
In the following example, we use the `filters` argument of the pyarrow
engine to filter the rows of the DataFrame.
Since `pyarrow` is the default engine, we can omit the `engine` argument.
Note that the `filters` argument is implemented by the `pyarrow` engine,
which can benefit from multithreading and also potentially be more
economical in terms of memory.
>>> sel = [("foo", ">", 2)]
>>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)
>>> restored_part
foo bar
0 3 8
1 4 9
"""
impl = get_engine(engine)
if use_nullable_dtypes is not lib.no_default:
msg = (
"The argument 'use_nullable_dtypes' is deprecated and will be removed "
"in a future version."
)
if use_nullable_dtypes is True:
msg += (
"Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True."
)
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
else:
use_nullable_dtypes = False
check_dtype_backend(dtype_backend)
return impl.read(
path,
columns=columns,
filters=filters,
storage_options=storage_options,
use_nullable_dtypes=use_nullable_dtypes,
dtype_backend=dtype_backend,
filesystem=filesystem,
**kwargs,
)
|
(path: 'FilePath | ReadBuffer[bytes]', engine: 'str' = 'auto', columns: 'list[str] | None' = None, storage_options: 'StorageOptions | None' = None, use_nullable_dtypes: 'bool | lib.NoDefault' = <no_default>, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, filesystem: 'Any' = None, filters: 'list[tuple] | list[list[tuple]] | None' = None, **kwargs) -> 'DataFrame'
|
68,367 |
pandas.io.pickle
|
read_pickle
|
Load pickled pandas object (or any object) from file.
.. warning::
Loading pickled data received from untrusted sources can be
unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``readlines()`` function.
Also accepts URL. URL is not limited to S3 and GCS.
compression : str or dict, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and 'filepath_or_buffer' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.
Set to ``None`` for no decompression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for Zstandard decompression using a
custom compression dictionary:
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
.. versionadded:: 1.5.0
Added support for `.tar` files.
.. versionchanged:: 1.4.0 Zstandard support.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
Returns
-------
same type as object stored in file
See Also
--------
DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.
Series.to_pickle : Pickle (serialize) Series object to file.
read_hdf : Read HDF5 file into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
read_parquet : Load a parquet object, returning a DataFrame.
Notes
-----
read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3
provided the object was serialized with to_pickle.
Examples
--------
>>> original_df = pd.DataFrame(
... {"foo": range(5), "bar": range(5, 10)}
... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
|
@doc(
storage_options=_shared_docs["storage_options"],
decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer",
)
def read_pickle(
filepath_or_buffer: FilePath | ReadPickleBuffer,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
) -> DataFrame | Series:
"""
Load pickled pandas object (or any object) from file.
.. warning::
Loading pickled data received from untrusted sources can be
unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``readlines()`` function.
Also accepts URL. URL is not limited to S3 and GCS.
{decompression_options}
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
Returns
-------
same type as object stored in file
See Also
--------
DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.
Series.to_pickle : Pickle (serialize) Series object to file.
read_hdf : Read HDF5 file into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
read_parquet : Load a parquet object, returning a DataFrame.
Notes
-----
read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3
provided the object was serialized with to_pickle.
Examples
--------
>>> original_df = pd.DataFrame(
... {{"foo": range(5), "bar": range(5, 10)}}
... ) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
"""
excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)
with get_handle(
filepath_or_buffer,
"rb",
compression=compression,
is_text=False,
storage_options=storage_options,
) as handles:
# 1) try standard library Pickle
# 2) try pickle_compat (older pandas version) to handle subclass changes
# 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError
try:
# TypeError for Cython complaints about object.__new__ vs Tick.__new__
try:
with warnings.catch_warnings(record=True):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return pickle.load(handles.handle)
except excs_to_catch:
# e.g.
# "No module named 'pandas.core.sparse.series'"
# "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"
return pc.load(handles.handle, encoding=None)
except UnicodeDecodeError:
# e.g. can occur for files written in py27; see GH#28645 and GH#31988
return pc.load(handles.handle, encoding="latin-1")
|
(filepath_or_buffer: 'FilePath | ReadPickleBuffer', compression: 'CompressionOptions' = 'infer', storage_options: 'StorageOptions | None' = None) -> 'DataFrame | Series'
|
68,368 |
pandas.io.sas.sasreader
|
read_sas
|
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas7bdat``.
format : str {'xport', 'sas7bdat'} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
compression : str or dict, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and 'filepath_or_buffer' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.
Set to ``None`` for no decompression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for Zstandard decompression using a
custom compression dictionary:
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
.. versionadded:: 1.5.0
Added support for `.tar` files.
Returns
-------
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader
Examples
--------
>>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
|
@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
format: str | None = None,
index: Hashable | None = None,
encoding: str | None = None,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
) -> DataFrame | ReaderBase:
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas7bdat``.
format : str {{'xport', 'sas7bdat'}} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
{decompression_options}
Returns
-------
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader
Examples
--------
>>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
"""
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
"than a string name, you must specify a format string"
)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
raise ValueError(buffer_error_msg)
fname = filepath_or_buffer.lower()
if ".xpt" in fname:
format = "xport"
elif ".sas7bdat" in fname:
format = "sas7bdat"
else:
raise ValueError(
f"unable to infer format of SAS file from filename: {repr(fname)}"
)
reader: ReaderBase
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
filepath_or_buffer,
index=index,
encoding=encoding,
chunksize=chunksize,
compression=compression,
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
filepath_or_buffer,
index=index,
encoding=encoding,
chunksize=chunksize,
compression=compression,
)
else:
raise ValueError("unknown SAS format")
if iterator or chunksize:
return reader
with reader:
return reader.read()
|
(filepath_or_buffer: 'FilePath | ReadBuffer[bytes]', *, format: 'str | None' = None, index: 'Hashable | None' = None, encoding: 'str | None' = None, chunksize: 'int | None' = None, iterator: 'bool' = False, compression: 'CompressionOptions' = 'infer') -> 'DataFrame | ReaderBase'
|
68,369 |
pandas.io.spss
|
read_spss
|
Load an SPSS file from the file path, returning a DataFrame.
Parameters
----------
path : str or Path
File path.
usecols : list-like, optional
Return a subset of the columns. If None, return all columns.
convert_categoricals : bool, default is True
Convert categorical columns into pd.Categorical.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame
Examples
--------
>>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP
|
def read_spss(
path: str | Path,
usecols: Sequence[str] | None = None,
convert_categoricals: bool = True,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Load an SPSS file from the file path, returning a DataFrame.
Parameters
----------
path : str or Path
File path.
usecols : list-like, optional
Return a subset of the columns. If None, return all columns.
convert_categoricals : bool, default is True
Convert categorical columns into pd.Categorical.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame
Examples
--------
>>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP
"""
pyreadstat = import_optional_dependency("pyreadstat")
check_dtype_backend(dtype_backend)
if usecols is not None:
if not is_list_like(usecols):
raise TypeError("usecols must be list-like.")
usecols = list(usecols) # pyreadstat requires a list
df, metadata = pyreadstat.read_sav(
stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals
)
df.attrs = metadata.__dict__
if dtype_backend is not lib.no_default:
df = df.convert_dtypes(dtype_backend=dtype_backend)
return df
|
(path: 'str | Path', usecols: 'Sequence[str] | None' = None, convert_categoricals: 'bool' = True, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame'
|
68,370 |
pandas.io.sql
|
read_sql
|
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection
ADBC provides high performance I/O with native type support, where available.
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the ADBC connection and
SQLAlchemy connectable; str connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/20/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
The argument is ignored if a table is passed instead of a query.
.. versionadded:: 2.0.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
Read data from SQL via either a SQL query or a SQL tablename.
When using a SQLite database only SQL queries are accepted,
providing only the SQL tablename will result in an error.
>>> from sqlite3 import connect
>>> conn = connect(':memory:')
>>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
... columns=['int_column', 'date_column'])
>>> df.to_sql(name='test_data', con=conn)
2
>>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
int_column date_column
0 0 10/11/12
1 1 12/11/10
>>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
Apply date parsing to columns through the ``parse_dates`` argument
The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
Custom argument values for applying ``pd.to_datetime`` on a column are specified
via a dictionary format:
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"format": "%d/%m/%y"}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
.. versionadded:: 2.2.0
pandas now supports reading via ADBC drivers
>>> from adbc_driver_postgresql import dbapi # doctest:+SKIP
>>> with dbapi.connect('postgres:///db_name') as conn: # doctest:+SKIP
... pd.read_sql('SELECT int_column FROM test_data', conn)
int_column
0 0
1 1
|
def read_sql(
sql,
con,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
params=None,
parse_dates=None,
columns: list[str] | None = None,
chunksize: int | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection
ADBC provides high performance I/O with native type support, where available.
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the ADBC connection and
SQLAlchemy connectable; str connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/20/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
The argument is ignored if a table is passed instead of a query.
.. versionadded:: 2.0.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
Read data from SQL via either a SQL query or a SQL tablename.
When using a SQLite database only SQL queries are accepted,
providing only the SQL tablename will result in an error.
>>> from sqlite3 import connect
>>> conn = connect(':memory:')
>>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
... columns=['int_column', 'date_column'])
>>> df.to_sql(name='test_data', con=conn)
2
>>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
int_column date_column
0 0 10/11/12
1 1 12/11/10
>>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
Apply date parsing to columns through the ``parse_dates`` argument
The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
Custom argument values for applying ``pd.to_datetime`` on a column are specified
via a dictionary format:
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"format": "%d/%m/%y"}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
.. versionadded:: 2.2.0
pandas now supports reading via ADBC drivers
>>> from adbc_driver_postgresql import dbapi # doctest:+SKIP
>>> with dbapi.connect('postgres:///db_name') as conn: # doctest:+SKIP
... pd.read_sql('SELECT int_column FROM test_data', conn)
int_column
0 0
1 1
"""
check_dtype_backend(dtype_backend)
if dtype_backend is lib.no_default:
dtype_backend = "numpy" # type: ignore[assignment]
assert dtype_backend is not lib.no_default
with pandasSQL_builder(con) as pandas_sql:
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype_backend=dtype_backend,
dtype=dtype,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
dtype_backend=dtype_backend,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype_backend=dtype_backend,
dtype=dtype,
)
|
(sql, con, index_col: 'str | list[str] | None' = None, coerce_float: 'bool' = True, params=None, parse_dates=None, columns: 'list[str] | None' = None, chunksize: 'int | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>, dtype: 'DtypeArg | None' = None) -> 'DataFrame | Iterator[DataFrame]'
|
68,371 |
pandas.io.sql
|
read_sql_query
|
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or mapping, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
.. versionadded:: 1.3.0
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
Examples
--------
>>> from sqlalchemy import create_engine # doctest: +SKIP
>>> engine = create_engine("sqlite:///database.db") # doctest: +SKIP
>>> with engine.connect() as conn, conn.begin(): # doctest: +SKIP
... data = pd.read_sql_table("data", conn) # doctest: +SKIP
|
def read_sql_query(
sql,
con,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
params: list[Any] | Mapping[str, Any] | None = None,
parse_dates: list[str] | dict[str, str] | None = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or mapping, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}.
.. versionadded:: 1.3.0
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
Examples
--------
>>> from sqlalchemy import create_engine # doctest: +SKIP
>>> engine = create_engine("sqlite:///database.db") # doctest: +SKIP
>>> with engine.connect() as conn, conn.begin(): # doctest: +SKIP
... data = pd.read_sql_table("data", conn) # doctest: +SKIP
"""
check_dtype_backend(dtype_backend)
if dtype_backend is lib.no_default:
dtype_backend = "numpy" # type: ignore[assignment]
assert dtype_backend is not lib.no_default
with pandasSQL_builder(con) as pandas_sql:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype=dtype,
dtype_backend=dtype_backend,
)
|
(sql, con, index_col: 'str | list[str] | None' = None, coerce_float: 'bool' = True, params: 'list[Any] | Mapping[str, Any] | None' = None, parse_dates: 'list[str] | dict[str, str] | None' = None, chunksize: 'int | None' = None, dtype: 'DtypeArg | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame | Iterator[DataFrame]'
|
68,372 |
pandas.io.sql
|
read_sql_table
|
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
|
def read_sql_table(
table_name: str,
con,
schema: str | None = None,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates: list[str] | dict[str, str] | None = None,
columns: list[str] | None = None,
chunksize: int | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
check_dtype_backend(dtype_backend)
if dtype_backend is lib.no_default:
dtype_backend = "numpy" # type: ignore[assignment]
assert dtype_backend is not lib.no_default
with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
if not pandas_sql.has_table(table_name):
raise ValueError(f"Table {table_name} not found")
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
dtype_backend=dtype_backend,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
|
(table_name: 'str', con, schema: 'str | None' = None, index_col: 'str | list[str] | None' = None, coerce_float: 'bool' = True, parse_dates: 'list[str] | dict[str, str] | None' = None, columns: 'list[str] | None' = None, chunksize: 'int | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame | Iterator[DataFrame]'
|
68,373 |
pandas.io.stata
|
read_stata
|
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables.
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered.
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines.
iterator : bool, default False
Return StataReader object.
compression : str or dict, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and 'filepath_or_buffer' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.
Set to ``None`` for no decompression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for Zstandard decompression using a
custom compression dictionary:
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
.. versionadded:: 1.5.0
Added support for `.tar` files.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
Returns
-------
DataFrame or pandas.api.typing.StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values.
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]}) # doctest: +SKIP
>>> df.to_stata('animals.dta') # doctest: +SKIP
Read a Stata dta file:
>>> df = pd.read_stata('animals.dta') # doctest: +SKIP
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8") # doctest: +SKIP
>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP
>>> df.to_stata('filename.dta') # doctest: +SKIP
>>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass # doctest: +SKIP
|
@Appender(_read_stata_doc)
def read_stata(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
) -> DataFrame | StataReader:
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
storage_options=storage_options,
compression=compression,
)
if iterator or chunksize:
return reader
with reader:
return reader.read()
|
(filepath_or_buffer: 'FilePath | ReadBuffer[bytes]', *, convert_dates: 'bool' = True, convert_categoricals: 'bool' = True, index_col: 'str | None' = None, convert_missing: 'bool' = False, preserve_dtypes: 'bool' = True, columns: 'Sequence[str] | None' = None, order_categoricals: 'bool' = True, chunksize: 'int | None' = None, iterator: 'bool' = False, compression: 'CompressionOptions' = 'infer', storage_options: 'StorageOptions | None' = None) -> 'DataFrame | StataReader'
|
68,374 |
pandas.io.parsers.readers
|
read_table
|
Read general delimited file into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default '\\t' (tab-stop)
Character or regex pattern to treat as the delimiter. If ``sep=None``, the
C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator from only the first valid
row of the file by Python's builtin sniffer tool, ``csv.Sniffer``.
In addition, separators longer than 1 character and different from
``'\s+'`` will be interpreted as regular expressions and will also force
the use of the Python parsing engine. Note that regex delimiters are prone
to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, optional
Alias for ``sep``.
header : int, Sequence of int, 'infer' or None, default 'infer'
Row number(s) containing column labels and marking the start of the
data (zero-indexed). Default behavior is to infer the column names: if no ``names``
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly to ``names`` then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a :class:`~pandas.MultiIndex` on the columns
e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : Sequence of Hashable, optional
Sequence of column labels to apply. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : Hashable, Sequence of Hashable or False, optional
Column(s) to use as row label(s), denoted either by column labels or column
indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`
will be formed for the row labels.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g., when you have a malformed file with delimiters at
the end of each line.
usecols : Sequence of Hashable or Callable, optional
Subset of columns to select, denoted either by column labels or column indices.
If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in ``names`` or
inferred from the document header row(s). If ``names`` are given, the document
header row(s) are not taken into account. For example, a valid list-like
``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order
preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]``
for columns in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to ``True``. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
dtype : dtype or dict of {Hashable : dtype}, optional
Data type(s) to apply to either the whole dataset or individual columns.
E.g., ``{'a': np.float64, 'b': np.int32, 'c': 'Int64'}``
Use ``str`` or ``object`` together with suitable ``na_values`` settings
to preserve and not interpret ``dtype``.
If ``converters`` are specified, they will be applied INSTEAD
of ``dtype`` conversion.
.. versionadded:: 1.5.0
Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where
the default determines the ``dtype`` of the columns which are not explicitly
listed.
engine : {'c', 'python', 'pyarrow'}, optional
Parser engine to use. The C and pyarrow engines are faster, while the python engine
is currently more feature-complete. Multithreading is currently only supported by
the pyarrow engine.
.. versionadded:: 1.4.0
The 'pyarrow' engine was added as an *experimental* engine, and some features
are unsupported, or may not work correctly, with this engine.
converters : dict of {Hashable : Callable}, optional
Functions for converting values in specified columns. Keys can either
be column labels or column indices.
true_values : list, optional
Values to consider as ``True`` in addition to case-insensitive variants of 'True'.
false_values : list, optional
Values to consider as ``False`` in addition to case-insensitive variants of 'False'.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : int, list of int or Callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (``int``)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning ``True`` if the row should be skipped and ``False`` otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with ``engine='c'``).
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : Hashable, Iterable of Hashable or dict of {Hashable : Iterable}, optional
Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific
per-column ``NA`` values. By default the following values are interpreted as
``NaN``: " ", "#N/A", "#N/A N/A", "#NA", "-1.#IND", "-1.#QNAN", "-NaN", "-nan",
"1.#IND", "1.#QNAN", "<NA>", "N/A", "NA", "NULL", "NaN", "None",
"n/a", "nan", "null ".
keep_default_na : bool, default True
Whether or not to include the default ``NaN`` values when parsing the data.
Depending on whether ``na_values`` is passed in, the behavior is as follows:
* If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values``
is appended to the default ``NaN`` values used for parsing.
* If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only
the default ``NaN`` values are used for parsing.
* If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only
the ``NaN`` values specified ``na_values`` are used for parsing.
* If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no
strings will be parsed as ``NaN``.
Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and
``na_values`` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of ``na_values``). In
data without any ``NA`` values, passing ``na_filter=False`` can improve the
performance of reading a large file.
verbose : bool, default False
Indicate number of ``NA`` values placed in non-numeric columns.
.. deprecated:: 2.2.0
skip_blank_lines : bool, default True
If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.
parse_dates : bool, list of Hashable, list of lists or dict of {Hashable : list}, default False
The behavior is as follows:
* ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to
``True`` if ``date_format`` or ``date_parser`` arguments have been passed.
* ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3
each as a separate date column.
* ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse
as a single date column. Values are joined with a space before parsing.
* ``dict``, e.g. ``{'foo' : [1, 3]}`` -> parse columns 1, 3 as date and call
result 'foo'. Values are joined with a space before parsing.
If a column or index cannot be represented as an array of ``datetime``,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an ``object`` data type. For
non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after
:func:`~pandas.read_csv`.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the
format of the ``datetime`` strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
.. deprecated:: 2.0.0
A strict version of this argument is now the default, passing it has no effect.
keep_date_col : bool, default False
If ``True`` and ``parse_dates`` specifies combining multiple columns then
keep the original columns.
date_parser : Callable, optional
Function to use for converting a sequence of string columns to an array of
``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the
conversion. pandas will try to call ``date_parser`` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by ``parse_dates`` into a single array
and pass that; and 3) call ``date_parser`` once for each row using one or
more strings (corresponding to the columns defined by ``parse_dates``) as
arguments.
.. deprecated:: 2.0.0
Use ``date_format`` instead, or read in as ``object`` and then apply
:func:`~pandas.to_datetime` as-needed.
date_format : str or dict of column -> format, optional
Format to use for parsing dates when used in conjunction with ``parse_dates``.
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices, though
note that :const:`"%f"` will parse all the way up to nanoseconds.
You can also pass:
- "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
time string (not necessarily in exactly the same format);
- "mixed", to infer the format for each element individually. This is risky,
and you should probably use it along with `dayfirst`.
.. versionadded:: 2.0.0
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If ``True``, use a cache of unique, converted dates to apply the ``datetime``
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
iterator : bool, default False
Return ``TextFileReader`` object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Number of lines to read from the file per chunk. Passing a value will cause the
function to return a ``TextFileReader`` object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : str or dict, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and 'filepath_or_buffer' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.
Set to ``None`` for no decompression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for Zstandard decompression using a
custom compression dictionary:
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
.. versionadded:: 1.5.0
Added support for `.tar` files.
.. versionchanged:: 1.4.0 Zstandard support.
thousands : str (length 1), optional
Character acting as the thousands separator in numerical values.
decimal : str (length 1), default '.'
Character to recognize as decimal point (e.g., use ',' for European data).
lineterminator : str (length 1), optional
Character used to denote a line break. Only valid with C parser.
quotechar : str (length 1), optional
Character used to denote the start and end of a quoted item. Quoted
items can include the ``delimiter`` and it will be ignored.
quoting : {0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, 3 or csv.QUOTE_NONE}, default csv.QUOTE_MINIMAL
Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is
``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special
characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,
or ``lineterminator``.
doublequote : bool, default True
When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive ``quotechar`` elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
Character used to escape other characters.
comment : str (length 1), optional
Character indicating that the remainder of line should not be parsed.
If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter ``header`` but not by
``skiprows``. For example, if ``comment='#'``, parsing
``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being
treated as the header.
encoding : str, optional, default 'utf-8'
Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
encoding_errors : str, optional, default 'strict'
How encoding errors are treated. `List of possible values
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
.. versionadded:: 1.3.0
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: ``delimiter``, ``doublequote``, ``escapechar``,
``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to
override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``
documentation for more details.
on_bad_lines : {'error', 'warn', 'skip'} or Callable, default 'error'
Specifies what to do upon encountering a bad line (a line with too many fields).
Allowed values are :
- ``'error'``, raise an Exception when a bad line is encountered.
- ``'warn'``, raise a warning when a bad line is encountered and skip that line.
- ``'skip'``, skip bad lines without raising or warning when they are encountered.
.. versionadded:: 1.3.0
.. versionadded:: 1.4.0
- Callable, function with signature
``(bad_line: list[str]) -> list[str] | None`` that will process a single
bad line. ``bad_line`` is a list of strings split by the ``sep``.
If the function returns ``None``, the bad line will be ignored.
If the function returns a new ``list`` of strings with more elements than
expected, a ``ParserWarning`` will be emitted while dropping extra elements.
Only supported when ``engine='python'``
.. versionchanged:: 2.2.0
- Callable, function with signature
as described in `pyarrow documentation
<https://arrow.apache.org/docs/python/generated/pyarrow.csv.ParseOptions.html
#pyarrow.csv.ParseOptions.invalid_row_handler>`_ when ``engine='pyarrow'``
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the ``sep`` delimiter. Equivalent to setting ``sep='\s+'``. If this option
is set to ``True``, nothing should be passed in for the ``delimiter``
parameter.
.. deprecated:: 2.2.0
Use ``sep="\s+"`` instead.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set ``False``, or specify the type with the ``dtype`` parameter.
Note that the entire file is read into a single :class:`~pandas.DataFrame`
regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in
chunks. (Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for ``filepath_or_buffer``, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : {'high', 'legacy', 'round_trip'}, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or ``'high'`` for the ordinary converter,
``'legacy'`` for the original lower precision pandas converter, and
``'round_trip'`` for the round-trip converter.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
DataFrame or TextFileReader
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.read_table('data.csv') # doctest: +SKIP
|
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
see_also_func_name="read_csv",
see_also_func_summary=(
"Read a comma-separated values (csv) file into DataFrame."
),
_default_sep=r"'\\t' (tab-stop)",
storage_options=_shared_docs["storage_options"],
decompression_options=_shared_docs["decompression_options"]
% "filepath_or_buffer",
)
)
def read_table(
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
*,
sep: str | None | lib.NoDefault = lib.no_default,
delimiter: str | None | lib.NoDefault = None,
# Column and Index Locations and Names
header: int | Sequence[int] | None | Literal["infer"] = "infer",
names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
index_col: IndexLabel | Literal[False] | None = None,
usecols: UsecolsArgType = None,
# General Parsing Configuration
dtype: DtypeArg | None = None,
engine: CSVEngine | None = None,
converters: Mapping[Hashable, Callable] | None = None,
true_values: list | None = None,
false_values: list | None = None,
skipinitialspace: bool = False,
skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
skipfooter: int = 0,
nrows: int | None = None,
# NA and Missing Data Handling
na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None,
keep_default_na: bool = True,
na_filter: bool = True,
verbose: bool | lib.NoDefault = lib.no_default,
skip_blank_lines: bool = True,
# Datetime Handling
parse_dates: bool | Sequence[Hashable] = False,
infer_datetime_format: bool | lib.NoDefault = lib.no_default,
keep_date_col: bool | lib.NoDefault = lib.no_default,
date_parser: Callable | lib.NoDefault = lib.no_default,
date_format: str | dict[Hashable, str] | None = None,
dayfirst: bool = False,
cache_dates: bool = True,
# Iteration
iterator: bool = False,
chunksize: int | None = None,
# Quoting, Compression, and File Format
compression: CompressionOptions = "infer",
thousands: str | None = None,
decimal: str = ".",
lineterminator: str | None = None,
quotechar: str = '"',
quoting: int = csv.QUOTE_MINIMAL,
doublequote: bool = True,
escapechar: str | None = None,
comment: str | None = None,
encoding: str | None = None,
encoding_errors: str | None = "strict",
dialect: str | csv.Dialect | None = None,
# Error Handling
on_bad_lines: str = "error",
# Internal
delim_whitespace: bool | lib.NoDefault = lib.no_default,
low_memory: bool = _c_parser_defaults["low_memory"],
memory_map: bool = False,
float_precision: str | None = None,
storage_options: StorageOptions | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame | TextFileReader:
if keep_date_col is not lib.no_default:
# GH#55569
warnings.warn(
"The 'keep_date_col' keyword in pd.read_table is deprecated and "
"will be removed in a future version. Explicitly remove unwanted "
"columns after parsing instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
else:
keep_date_col = False
# error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__"
if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
# GH#55569
warnings.warn(
"Support for nested sequences for 'parse_dates' in pd.read_table "
"is deprecated. Combine the desired columns with pd.to_datetime "
"after parsing instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
if infer_datetime_format is not lib.no_default:
warnings.warn(
"The argument 'infer_datetime_format' is deprecated and will "
"be removed in a future version. "
"A strict version of it is now the default, see "
"https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
"You can safely remove this argument.",
FutureWarning,
stacklevel=find_stack_level(),
)
if delim_whitespace is not lib.no_default:
# GH#55569
warnings.warn(
"The 'delim_whitespace' keyword in pd.read_table is deprecated and "
"will be removed in a future version. Use ``sep='\\s+'`` instead",
FutureWarning,
stacklevel=find_stack_level(),
)
else:
delim_whitespace = False
if verbose is not lib.no_default:
# GH#55569
warnings.warn(
"The 'verbose' keyword in pd.read_table is deprecated and "
"will be removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
else:
verbose = False
# locals() should never be modified
kwds = locals().copy()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect,
delimiter,
delim_whitespace,
engine,
sep,
on_bad_lines,
names,
defaults={"delimiter": "\t"},
dtype_backend=dtype_backend,
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
|
(filepath_or_buffer: 'FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str]', *, sep: 'str | None | lib.NoDefault' = <no_default>, delimiter: 'str | None | lib.NoDefault' = None, header: "int | Sequence[int] | None | Literal['infer']" = 'infer', names: 'Sequence[Hashable] | None | lib.NoDefault' = <no_default>, index_col: 'IndexLabel | Literal[False] | None' = None, usecols: 'UsecolsArgType' = None, dtype: 'DtypeArg | None' = None, engine: 'CSVEngine | None' = None, converters: 'Mapping[Hashable, Callable] | None' = None, true_values: 'list | None' = None, false_values: 'list | None' = None, skipinitialspace: 'bool' = False, skiprows: 'list[int] | int | Callable[[Hashable], bool] | None' = None, skipfooter: 'int' = 0, nrows: 'int | None' = None, na_values: 'Sequence[str] | Mapping[str, Sequence[str]] | None' = None, keep_default_na: 'bool' = True, na_filter: 'bool' = True, verbose: 'bool | lib.NoDefault' = <no_default>, skip_blank_lines: 'bool' = True, parse_dates: 'bool | Sequence[Hashable]' = False, infer_datetime_format: 'bool | lib.NoDefault' = <no_default>, keep_date_col: 'bool | lib.NoDefault' = <no_default>, date_parser: 'Callable | lib.NoDefault' = <no_default>, date_format: 'str | dict[Hashable, str] | None' = None, dayfirst: 'bool' = False, cache_dates: 'bool' = True, iterator: 'bool' = False, chunksize: 'int | None' = None, compression: 'CompressionOptions' = 'infer', thousands: 'str | None' = None, decimal: 'str' = '.', lineterminator: 'str | None' = None, quotechar: 'str' = '"', quoting: 'int' = 0, doublequote: 'bool' = True, escapechar: 'str | None' = None, comment: 'str | None' = None, encoding: 'str | None' = None, encoding_errors: 'str | None' = 'strict', dialect: 'str | csv.Dialect | None' = None, on_bad_lines: 'str' = 'error', delim_whitespace: 'bool | lib.NoDefault' = <no_default>, low_memory: 'bool' = True, memory_map: 'bool' = False, float_precision: 'str | None' = None, storage_options: 'StorageOptions | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame | TextFileReader'
|
68,375 |
pandas.io.xml
|
read_xml
|
Read XML document into a :class:`~pandas.DataFrame` object.
.. versionadded:: 1.3.0
Parameters
----------
path_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a ``read()`` function. The string can be any valid XML
string or a path. The string can further be a URL. Valid URL schemes
include http, ftp, s3, and file.
.. deprecated:: 2.1.0
Passing xml literal strings is deprecated.
Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead.
xpath : str, optional, default './\*'
The ``XPath`` to parse required set of nodes for migration to
:class:`~pandas.DataFrame`.``XPath`` should return a collection of elements
and not a single element. Note: The ``etree`` parser supports limited ``XPath``
expressions. For more complex ``XPath``, use ``lxml`` which requires
installation.
namespaces : dict, optional
The namespaces defined in XML document as dicts with key being
namespace prefix and value the URI. There is no need to include all
namespaces in XML, only the ones used in ``xpath`` expression.
Note: if XML document uses default namespace denoted as
`xmlns='<URI>'` without a prefix, you must assign any temporary
namespace prefix such as 'doc' to the URI in order to parse
underlying nodes and/or attributes. For example, ::
namespaces = {"doc": "https://example.com"}
elems_only : bool, optional, default False
Parse only the child elements at the specified ``xpath``. By default,
all child elements and non-empty text nodes are returned.
attrs_only : bool, optional, default False
Parse only the attributes at the specified ``xpath``.
By default, all attributes are returned.
names : list-like, optional
Column names for DataFrame of parsed XML data. Use this parameter to
rename original element names and distinguish same named elements and
attributes.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32,
'c': 'Int64'}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 1.5.0
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
.. versionadded:: 1.5.0
parse_dates : bool or list of int or names or list of lists or dict, default False
Identifiers to parse index or columns to datetime. The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
.. versionadded:: 1.5.0
encoding : str, optional, default 'utf-8'
Encoding of XML document.
parser : {'lxml','etree'}, default 'lxml'
Parser module to use for retrieval of data. Only 'lxml' and
'etree' are supported. With 'lxml' more complex ``XPath`` searches
and ability to use XSLT stylesheet are supported.
stylesheet : str, path object or file-like object
A URL, file-like object, or a raw string containing an XSLT script.
This stylesheet should flatten complex, deeply nested XML documents
for easier parsing. To use this feature you must have ``lxml`` module
installed and specify 'lxml' as ``parser``. The ``xpath`` must
reference nodes of transformed XML document generated after XSLT
transformation and not the original XML document. Only XSLT 1.0
scripts and not later versions is currently supported.
iterparse : dict, optional
The nodes or attributes to retrieve in iterparsing of XML document
as a dict with key being the name of repeating element and value being
list of elements or attribute names that are descendants of the repeated
element. Note: If this option is used, it will replace ``xpath`` parsing
and unlike ``xpath``, descendants do not need to relate to each other but can
exist any where in document under the repeating element. This memory-
efficient method should be used for very large XML files (500MB, 1GB, or 5GB+).
For example, ::
iterparse = {"row_element": ["child_elem", "attr", "grandchild_elem"]}
.. versionadded:: 1.5.0
compression : str or dict, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and 'path_or_buffer' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.
Set to ``None`` for no decompression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for Zstandard decompression using a
custom compression dictionary:
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
.. versionadded:: 1.5.0
Added support for `.tar` files.
.. versionchanged:: 1.4.0 Zstandard support.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
df
A DataFrame.
See Also
--------
read_json : Convert a JSON string to pandas object.
read_html : Read HTML tables into a list of DataFrame objects.
Notes
-----
This method is best designed to import shallow XML documents in
following format which is the ideal fit for the two-dimensions of a
``DataFrame`` (row by column). ::
<root>
<row>
<column1>data</column1>
<column2>data</column2>
<column3>data</column3>
...
</row>
<row>
...
</row>
...
</root>
As a file format, XML documents can be designed any way including
layout of elements and attributes as long as it conforms to W3C
specifications. Therefore, this method is a convenience handler for
a specific flatter design and not all possible XML structures.
However, for more complex XML documents, ``stylesheet`` allows you to
temporarily redesign original document with XSLT (a special purpose
language) for a flatter version for migration to a DataFrame.
This function will *always* return a single :class:`DataFrame` or raise
exceptions due to issues with XML document, ``xpath``, or other
parameters.
See the :ref:`read_xml documentation in the IO section of the docs
<io.read_xml>` for more information in using this method to parse XML
files to DataFrames.
Examples
--------
>>> from io import StringIO
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data xmlns="http://example.com">
... <row>
... <shape>square</shape>
... <degrees>360</degrees>
... <sides>4.0</sides>
... </row>
... <row>
... <shape>circle</shape>
... <degrees>360</degrees>
... <sides/>
... </row>
... <row>
... <shape>triangle</shape>
... <degrees>180</degrees>
... <sides>3.0</sides>
... </row>
... </data>'''
>>> df = pd.read_xml(StringIO(xml))
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data>
... <row shape="square" degrees="360" sides="4.0"/>
... <row shape="circle" degrees="360"/>
... <row shape="triangle" degrees="180" sides="3.0"/>
... </data>'''
>>> df = pd.read_xml(StringIO(xml), xpath=".//row")
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <doc:data xmlns:doc="https://example.com">
... <doc:row>
... <doc:shape>square</doc:shape>
... <doc:degrees>360</doc:degrees>
... <doc:sides>4.0</doc:sides>
... </doc:row>
... <doc:row>
... <doc:shape>circle</doc:shape>
... <doc:degrees>360</doc:degrees>
... <doc:sides/>
... </doc:row>
... <doc:row>
... <doc:shape>triangle</doc:shape>
... <doc:degrees>180</doc:degrees>
... <doc:sides>3.0</doc:sides>
... </doc:row>
... </doc:data>'''
>>> df = pd.read_xml(StringIO(xml),
... xpath="//doc:row",
... namespaces={"doc": "https://example.com"})
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml_data = '''
... <data>
... <row>
... <index>0</index>
... <a>1</a>
... <b>2.5</b>
... <c>True</c>
... <d>a</d>
... <e>2019-12-31 00:00:00</e>
... </row>
... <row>
... <index>1</index>
... <b>4.5</b>
... <c>False</c>
... <d>b</d>
... <e>2019-12-31 00:00:00</e>
... </row>
... </data>
... '''
>>> df = pd.read_xml(StringIO(xml_data),
... dtype_backend="numpy_nullable",
... parse_dates=["e"])
>>> df
index a b c d e
0 0 1 2.5 True a 2019-12-31
1 1 <NA> 4.5 False b 2019-12-31
|
@doc(
storage_options=_shared_docs["storage_options"],
decompression_options=_shared_docs["decompression_options"] % "path_or_buffer",
)
def read_xml(
path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
*,
xpath: str = "./*",
namespaces: dict[str, str] | None = None,
elems_only: bool = False,
attrs_only: bool = False,
names: Sequence[str] | None = None,
dtype: DtypeArg | None = None,
converters: ConvertersArg | None = None,
parse_dates: ParseDatesArg | None = None,
# encoding can not be None for lxml and StringIO input
encoding: str | None = "utf-8",
parser: XMLParsers = "lxml",
stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None,
iterparse: dict[str, list[str]] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
) -> DataFrame:
r"""
Read XML document into a :class:`~pandas.DataFrame` object.
.. versionadded:: 1.3.0
Parameters
----------
path_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a ``read()`` function. The string can be any valid XML
string or a path. The string can further be a URL. Valid URL schemes
include http, ftp, s3, and file.
.. deprecated:: 2.1.0
Passing xml literal strings is deprecated.
Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead.
xpath : str, optional, default './\*'
The ``XPath`` to parse required set of nodes for migration to
:class:`~pandas.DataFrame`.``XPath`` should return a collection of elements
and not a single element. Note: The ``etree`` parser supports limited ``XPath``
expressions. For more complex ``XPath``, use ``lxml`` which requires
installation.
namespaces : dict, optional
The namespaces defined in XML document as dicts with key being
namespace prefix and value the URI. There is no need to include all
namespaces in XML, only the ones used in ``xpath`` expression.
Note: if XML document uses default namespace denoted as
`xmlns='<URI>'` without a prefix, you must assign any temporary
namespace prefix such as 'doc' to the URI in order to parse
underlying nodes and/or attributes. For example, ::
namespaces = {{"doc": "https://example.com"}}
elems_only : bool, optional, default False
Parse only the child elements at the specified ``xpath``. By default,
all child elements and non-empty text nodes are returned.
attrs_only : bool, optional, default False
Parse only the attributes at the specified ``xpath``.
By default, all attributes are returned.
names : list-like, optional
Column names for DataFrame of parsed XML data. Use this parameter to
rename original element names and distinguish same named elements and
attributes.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 1.5.0
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
.. versionadded:: 1.5.0
parse_dates : bool or list of int or names or list of lists or dict, default False
Identifiers to parse index or columns to datetime. The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
.. versionadded:: 1.5.0
encoding : str, optional, default 'utf-8'
Encoding of XML document.
parser : {{'lxml','etree'}}, default 'lxml'
Parser module to use for retrieval of data. Only 'lxml' and
'etree' are supported. With 'lxml' more complex ``XPath`` searches
and ability to use XSLT stylesheet are supported.
stylesheet : str, path object or file-like object
A URL, file-like object, or a raw string containing an XSLT script.
This stylesheet should flatten complex, deeply nested XML documents
for easier parsing. To use this feature you must have ``lxml`` module
installed and specify 'lxml' as ``parser``. The ``xpath`` must
reference nodes of transformed XML document generated after XSLT
transformation and not the original XML document. Only XSLT 1.0
scripts and not later versions is currently supported.
iterparse : dict, optional
The nodes or attributes to retrieve in iterparsing of XML document
as a dict with key being the name of repeating element and value being
list of elements or attribute names that are descendants of the repeated
element. Note: If this option is used, it will replace ``xpath`` parsing
and unlike ``xpath``, descendants do not need to relate to each other but can
exist any where in document under the repeating element. This memory-
efficient method should be used for very large XML files (500MB, 1GB, or 5GB+).
For example, ::
iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}}
.. versionadded:: 1.5.0
{decompression_options}
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
df
A DataFrame.
See Also
--------
read_json : Convert a JSON string to pandas object.
read_html : Read HTML tables into a list of DataFrame objects.
Notes
-----
This method is best designed to import shallow XML documents in
following format which is the ideal fit for the two-dimensions of a
``DataFrame`` (row by column). ::
<root>
<row>
<column1>data</column1>
<column2>data</column2>
<column3>data</column3>
...
</row>
<row>
...
</row>
...
</root>
As a file format, XML documents can be designed any way including
layout of elements and attributes as long as it conforms to W3C
specifications. Therefore, this method is a convenience handler for
a specific flatter design and not all possible XML structures.
However, for more complex XML documents, ``stylesheet`` allows you to
temporarily redesign original document with XSLT (a special purpose
language) for a flatter version for migration to a DataFrame.
This function will *always* return a single :class:`DataFrame` or raise
exceptions due to issues with XML document, ``xpath``, or other
parameters.
See the :ref:`read_xml documentation in the IO section of the docs
<io.read_xml>` for more information in using this method to parse XML
files to DataFrames.
Examples
--------
>>> from io import StringIO
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data xmlns="http://example.com">
... <row>
... <shape>square</shape>
... <degrees>360</degrees>
... <sides>4.0</sides>
... </row>
... <row>
... <shape>circle</shape>
... <degrees>360</degrees>
... <sides/>
... </row>
... <row>
... <shape>triangle</shape>
... <degrees>180</degrees>
... <sides>3.0</sides>
... </row>
... </data>'''
>>> df = pd.read_xml(StringIO(xml))
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data>
... <row shape="square" degrees="360" sides="4.0"/>
... <row shape="circle" degrees="360"/>
... <row shape="triangle" degrees="180" sides="3.0"/>
... </data>'''
>>> df = pd.read_xml(StringIO(xml), xpath=".//row")
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <doc:data xmlns:doc="https://example.com">
... <doc:row>
... <doc:shape>square</doc:shape>
... <doc:degrees>360</doc:degrees>
... <doc:sides>4.0</doc:sides>
... </doc:row>
... <doc:row>
... <doc:shape>circle</doc:shape>
... <doc:degrees>360</doc:degrees>
... <doc:sides/>
... </doc:row>
... <doc:row>
... <doc:shape>triangle</doc:shape>
... <doc:degrees>180</doc:degrees>
... <doc:sides>3.0</doc:sides>
... </doc:row>
... </doc:data>'''
>>> df = pd.read_xml(StringIO(xml),
... xpath="//doc:row",
... namespaces={{"doc": "https://example.com"}})
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml_data = '''
... <data>
... <row>
... <index>0</index>
... <a>1</a>
... <b>2.5</b>
... <c>True</c>
... <d>a</d>
... <e>2019-12-31 00:00:00</e>
... </row>
... <row>
... <index>1</index>
... <b>4.5</b>
... <c>False</c>
... <d>b</d>
... <e>2019-12-31 00:00:00</e>
... </row>
... </data>
... '''
>>> df = pd.read_xml(StringIO(xml_data),
... dtype_backend="numpy_nullable",
... parse_dates=["e"])
>>> df
index a b c d e
0 0 1 2.5 True a 2019-12-31
1 1 <NA> 4.5 False b 2019-12-31
"""
check_dtype_backend(dtype_backend)
return _parse(
path_or_buffer=path_or_buffer,
xpath=xpath,
namespaces=namespaces,
elems_only=elems_only,
attrs_only=attrs_only,
names=names,
dtype=dtype,
converters=converters,
parse_dates=parse_dates,
encoding=encoding,
parser=parser,
stylesheet=stylesheet,
iterparse=iterparse,
compression=compression,
storage_options=storage_options,
dtype_backend=dtype_backend,
)
|
(path_or_buffer: 'FilePath | ReadBuffer[bytes] | ReadBuffer[str]', *, xpath: 'str' = './*', namespaces: 'dict[str, str] | None' = None, elems_only: 'bool' = False, attrs_only: 'bool' = False, names: 'Sequence[str] | None' = None, dtype: 'DtypeArg | None' = None, converters: 'ConvertersArg | None' = None, parse_dates: 'ParseDatesArg | None' = None, encoding: 'str | None' = 'utf-8', parser: 'XMLParsers' = 'lxml', stylesheet: 'FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None' = None, iterparse: 'dict[str, list[str]] | None' = None, compression: 'CompressionOptions' = 'infer', storage_options: 'StorageOptions | None' = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>) -> 'DataFrame'
|
68,376 |
pandas.io.formats.format
|
set_eng_float_format
|
Format float representation in DataFrame with SI notation.
Parameters
----------
accuracy : int, default 3
Number of decimal digits after the floating point.
use_eng_prefix : bool, default False
Whether to represent a value with SI prefixes.
Returns
-------
None
Examples
--------
>>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])
>>> df
0
0 1.000000e-09
1 1.000000e-03
2 1.000000e+00
3 1.000000e+03
4 1.000000e+06
>>> pd.set_eng_float_format(accuracy=1)
>>> df
0
0 1.0E-09
1 1.0E-03
2 1.0E+00
3 1.0E+03
4 1.0E+06
>>> pd.set_eng_float_format(use_eng_prefix=True)
>>> df
0
0 1.000n
1 1.000m
2 1.000
3 1.000k
4 1.000M
>>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
>>> df
0
0 1.0n
1 1.0m
2 1.0
3 1.0k
4 1.0M
>>> pd.set_option("display.float_format", None) # unset option
|
def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
"""
Format float representation in DataFrame with SI notation.
Parameters
----------
accuracy : int, default 3
Number of decimal digits after the floating point.
use_eng_prefix : bool, default False
Whether to represent a value with SI prefixes.
Returns
-------
None
Examples
--------
>>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])
>>> df
0
0 1.000000e-09
1 1.000000e-03
2 1.000000e+00
3 1.000000e+03
4 1.000000e+06
>>> pd.set_eng_float_format(accuracy=1)
>>> df
0
0 1.0E-09
1 1.0E-03
2 1.0E+00
3 1.0E+03
4 1.0E+06
>>> pd.set_eng_float_format(use_eng_prefix=True)
>>> df
0
0 1.000n
1 1.000m
2 1.000
3 1.000k
4 1.000M
>>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
>>> df
0
0 1.0n
1 1.0m
2 1.0
3 1.0k
4 1.0M
>>> pd.set_option("display.float_format", None) # unset option
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
|
(accuracy: int = 3, use_eng_prefix: bool = False) -> NoneType
|
68,377 |
pandas.util._print_versions
|
show_versions
|
Provide useful information, important for bug reports.
It comprises info about hosting operation system, pandas version,
and versions of other installed relative packages.
Parameters
----------
as_json : str or bool, default False
* If False, outputs info in a human readable form to the console.
* If str, it will be considered as a path to a file.
Info will be written to that file in JSON format.
* If True, outputs info in JSON format to the console.
Examples
--------
>>> pd.show_versions() # doctest: +SKIP
Your output may look something like this:
INSTALLED VERSIONS
------------------
commit : 37ea63d540fd27274cad6585082c91b1283f963d
python : 3.10.6.final.0
python-bits : 64
OS : Linux
OS-release : 5.10.102.1-microsoft-standard-WSL2
Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022
machine : x86_64
processor : x86_64
byteorder : little
LC_ALL : None
LANG : en_GB.UTF-8
LOCALE : en_GB.UTF-8
pandas : 2.0.1
numpy : 1.24.3
...
|
def show_versions(as_json: str | bool = False) -> None:
"""
Provide useful information, important for bug reports.
It comprises info about hosting operation system, pandas version,
and versions of other installed relative packages.
Parameters
----------
as_json : str or bool, default False
* If False, outputs info in a human readable form to the console.
* If str, it will be considered as a path to a file.
Info will be written to that file in JSON format.
* If True, outputs info in JSON format to the console.
Examples
--------
>>> pd.show_versions() # doctest: +SKIP
Your output may look something like this:
INSTALLED VERSIONS
------------------
commit : 37ea63d540fd27274cad6585082c91b1283f963d
python : 3.10.6.final.0
python-bits : 64
OS : Linux
OS-release : 5.10.102.1-microsoft-standard-WSL2
Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022
machine : x86_64
processor : x86_64
byteorder : little
LC_ALL : None
LANG : en_GB.UTF-8
LOCALE : en_GB.UTF-8
pandas : 2.0.1
numpy : 1.24.3
...
"""
sys_info = _get_sys_info()
deps = _get_dependency_info()
if as_json:
j = {"system": sys_info, "dependencies": deps}
if as_json is True:
sys.stdout.writelines(json.dumps(j, indent=2))
else:
assert isinstance(as_json, str) # needed for mypy
with codecs.open(as_json, "wb", encoding="utf8") as f:
json.dump(j, f, indent=2)
else:
assert isinstance(sys_info["LOCALE"], dict) # needed for mypy
language_code = sys_info["LOCALE"]["language-code"]
encoding = sys_info["LOCALE"]["encoding"]
sys_info["LOCALE"] = f"{language_code}.{encoding}"
maxlen = max(len(x) for x in deps)
print("\nINSTALLED VERSIONS")
print("------------------")
for k, v in sys_info.items():
print(f"{k:<{maxlen}}: {v}")
print("")
for k, v in deps.items():
print(f"{k:<{maxlen}}: {v}")
|
(as_json: str | bool = False) -> NoneType
|
68,378 |
pandas.util._tester
|
test
|
Run the pandas test suite using pytest.
By default, runs with the marks -m "not slow and not network and not db"
Parameters
----------
extra_args : list[str], default None
Extra marks to run the tests.
run_doctests : bool, default False
Whether to only run the Python and Cython doctests. If you would like to run
both doctests/regular tests, just append "--doctest-modules"/"--doctest-cython"
to extra_args.
Examples
--------
>>> pd.test() # doctest: +SKIP
running: pytest...
|
def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None:
"""
Run the pandas test suite using pytest.
By default, runs with the marks -m "not slow and not network and not db"
Parameters
----------
extra_args : list[str], default None
Extra marks to run the tests.
run_doctests : bool, default False
Whether to only run the Python and Cython doctests. If you would like to run
both doctests/regular tests, just append "--doctest-modules"/"--doctest-cython"
to extra_args.
Examples
--------
>>> pd.test() # doctest: +SKIP
running: pytest...
"""
pytest = import_optional_dependency("pytest")
import_optional_dependency("hypothesis")
cmd = ["-m not slow and not network and not db"]
if extra_args:
if not isinstance(extra_args, list):
extra_args = [extra_args]
cmd = extra_args
if run_doctests:
cmd = [
"--doctest-modules",
"--doctest-cython",
f"--ignore={os.path.join(PKG, 'tests')}",
]
cmd += [PKG]
joined = " ".join(cmd)
print(f"running: pytest {joined}")
sys.exit(pytest.main(cmd))
|
(extra_args: Optional[list[str]] = None, run_doctests: bool = False) -> NoneType
|
68,380 |
pandas.core.indexes.timedeltas
|
timedelta_range
|
Return a fixed frequency TimedeltaIndex with day as the default.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5h'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
unit : str, default None
Specify the desired resolution of the result.
.. versionadded:: 2.0.0
Returns
-------
TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6h')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6h')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
**Specify a unit**
>>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s")
TimedeltaIndex(['1 days', '100001 days', '200001 days'],
dtype='timedelta64[s]', freq='100000D')
|
def timedelta_range(
start=None,
end=None,
periods: int | None = None,
freq=None,
name=None,
closed=None,
*,
unit: str | None = None,
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex with day as the default.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5h'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
unit : str, default None
Specify the desired resolution of the result.
.. versionadded:: 2.0.0
Returns
-------
TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6h')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6h')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
**Specify a unit**
>>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s")
TimedeltaIndex(['1 days', '100001 days', '200001 days'],
dtype='timedelta64[s]', freq='100000D')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
freq = to_offset(freq)
tdarr = TimedeltaArray._generate_range(
start, end, periods, freq, closed=closed, unit=unit
)
return TimedeltaIndex._simple_new(tdarr, name=name)
|
(start=None, end=None, periods: Optional[int] = None, freq=None, name=None, closed=None, *, unit: Optional[str] = None) -> pandas.core.indexes.timedeltas.TimedeltaIndex
|
68,381 |
pandas.core.tools.datetimes
|
to_datetime
|
Convert argument to datetime.
This function converts a scalar, array-like, :class:`Series` or
:class:`DataFrame`/dict-like to a pandas datetime object.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime. If a :class:`DataFrame` is provided, the
method expects minimally the following columns: :const:`"year"`,
:const:`"month"`, :const:`"day"`. The column "year"
must be specified in 4-digit format.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception.
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
- If :const:`'ignore'`, then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
is parsed as :const:`2012-11-10`.
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
with day first.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
- If :const:`True` parses dates with the year first, e.g.
:const:`"10/11/12"` is parsed as :const:`2010-11-12`.
- If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
preceded (same as :mod:`dateutil`).
.. warning::
``yearfirst=True`` is not strict, but will prefer to parse
with year first.
utc : bool, default False
Control timezone-related parsing, localization and conversion.
- If :const:`True`, the function *always* returns a timezone-aware
UTC-localized :class:`Timestamp`, :class:`Series` or
:class:`DatetimeIndex`. To do this, timezone-naive inputs are
*localized* as UTC, while timezone-aware inputs are *converted* to UTC.
- If :const:`False` (default), inputs will not be coerced to UTC.
Timezone-naive inputs will remain naive, while timezone-aware ones
will keep their time offsets. Limitations exist for mixed
offsets (typically, daylight savings), see :ref:`Examples
<to_datetime_tz_examples>` section for details.
.. warning::
In a future version of pandas, parsing datetimes with mixed time
zones will raise an error unless `utc=True`.
Please specify `utc=True` to opt in to the new behaviour
and silence this warning. To create a `Series` with mixed offsets and
`object` dtype, please use `apply` and `datetime.datetime.strptime`.
See also: pandas general documentation about `timezone conversion and
localization
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#time-zone-handling>`_.
format : str, default None
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices, though
note that :const:`"%f"` will parse all the way up to nanoseconds.
You can also pass:
- "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
time string (not necessarily in exactly the same format);
- "mixed", to infer the format for each element individually. This is risky,
and you should probably use it along with `dayfirst`.
.. note::
If a :class:`DataFrame` is passed, then `format` has no effect.
exact : bool, default True
Control how `format` is used:
- If :const:`True`, require an exact `format` match.
- If :const:`False`, allow the `format` to match anywhere in the target
string.
Cannot be used alongside ``format='ISO8601'`` or ``format='mixed'``.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If :const:`True` and no `format` is given, attempt to infer the format
of the datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
.. deprecated:: 2.0.0
A strict version of this argument is now the default, passing it has
no effect.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
- If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
beginning of Julian Calendar. Julian day number :const:`0` is assigned
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible (Timestamp, dt.datetime, np.datetimt64 or date
string), origin is set to Timestamp identified by origin.
- If a float or integer, origin is the difference
(in units determined by the ``unit`` argument) relative to 1970-01-01.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
duplicate date strings, especially ones with timezone offsets. The cache
is only used when there are at least 50 values. The presence of
out-of-bounds values will render the cache unusable and may slow down
parsing.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input (types in parenthesis correspond to
fallback in case of unsuccessful timezone or out-of-range timestamp
parsing):
- scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
- array-like: :class:`DatetimeIndex` (or :class:`Series` with
:class:`object` dtype containing :class:`datetime.datetime`)
- Series: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
- DataFrame: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
Raises
------
ParserError
When parsing a date from string fails.
ValueError
When another datetime conversion error happens. For example when one
of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
when a Timezone-aware :class:`datetime.datetime` is found in an array-like
of mixed time offsets, and ``utc=False``.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Notes
-----
Many input types are supported, and lead to different output types:
- **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
module or :mod:`numpy`). They are converted to :class:`Timestamp` when
possible, otherwise they are converted to :class:`datetime.datetime`.
None/NaN/null scalars are converted to :const:`NaT`.
- **array-like** can contain int, float, str, datetime objects. They are
converted to :class:`DatetimeIndex` when possible, otherwise they are
converted to :class:`Index` with :class:`object` dtype, containing
:class:`datetime.datetime`. None/NaN/null entries are converted to
:const:`NaT` in both cases.
- **Series** are converted to :class:`Series` with :class:`datetime64`
dtype when possible, otherwise they are converted to :class:`Series` with
:class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
entries are converted to :const:`NaT` in both cases.
- **DataFrame/dict-like** are converted to :class:`Series` with
:class:`datetime64` dtype. For each row a datetime is created from assembling
the various dataframe columns. Column keys can be common abbreviations
like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or
plurals of the same.
The following causes are responsible for :class:`datetime.datetime` objects
being returned (possibly inside an :class:`Index` or a :class:`Series` with
:class:`object` dtype) instead of a proper pandas designated type
(:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
with :class:`datetime64` dtype):
- when any input element is before :const:`Timestamp.min` or after
:const:`Timestamp.max`, see `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_.
- when ``utc=False`` (default) and the input is an array-like or
:class:`Series` containing mixed naive/aware datetime, or aware with mixed
time offsets. Note that this happens in the (quite frequent) situation when
the timezone has a daylight savings policy. In that case you may wish to
use ``utc=True``.
Examples
--------
**Handling various input formats**
Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
**Differences with strptime behavior**
:const:`"%f"` will parse all the way up to nanoseconds.
>>> pd.to_datetime('2018-10-26 12:00:00.0000000011',
... format='%Y-%m-%d %H:%M:%S.%f')
Timestamp('2018-10-26 12:00:00.000000001')
**Non-convertible date/times**
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
.. _to_datetime_tz_examples:
**Timezones and time offsets**
The default behaviour (``utc=False``) is as follows:
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00:00', '2018-10-26 13:00:15'])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, UTC-05:00]', freq=None)
- However, timezone-aware inputs *with mixed time offsets* (for example
issued from a timezone with daylight savings, such as Europe/Paris)
are **not successfully converted** to a :class:`DatetimeIndex`.
Parsing datetimes with mixed time zones will show a warning unless
`utc=True`. If you specify `utc=False` the warning below will be shown
and a simple :class:`Index` containing :class:`datetime.datetime`
objects will be returned:
>>> pd.to_datetime(['2020-10-25 02:00 +0200',
... '2020-10-25 04:00 +0100']) # doctest: +SKIP
FutureWarning: In a future version of pandas, parsing datetimes with mixed
time zones will raise an error unless `utc=True`. Please specify `utc=True`
to opt in to the new behaviour and silence this warning. To create a `Series`
with mixed offsets and `object` dtype, please use `apply` and
`datetime.datetime.strptime`.
Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
dtype='object')
- A mix of timezone-aware and timezone-naive inputs is also converted to
a simple :class:`Index` containing :class:`datetime.datetime` objects:
>>> from datetime import datetime
>>> pd.to_datetime(["2020-01-01 01:00:00-01:00",
... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP
FutureWarning: In a future version of pandas, parsing datetimes with mixed
time zones will raise an error unless `utc=True`. Please specify `utc=True`
to opt in to the new behaviour and silence this warning. To create a `Series`
with mixed offsets and `object` dtype, please use `apply` and
`datetime.datetime.strptime`.
Index([2020-01-01 01:00:00-01:00, 2020-01-01 03:00:00], dtype='object')
|
Setting ``utc=True`` solves most of the above issues:
- Timezone-naive inputs are *localized* as UTC
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
>>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
... utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both string or datetime, the above
rules still apply
>>> pd.to_datetime(['2018-10-26 12:00', datetime(2020, 1, 1, 18)], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
|
def to_datetime(
arg: DatetimeScalarOrArrayConvertible | DictConvertible,
errors: DateTimeErrorChoices = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: bool = False,
format: str | None = None,
exact: bool | lib.NoDefault = lib.no_default,
unit: str | None = None,
infer_datetime_format: lib.NoDefault | bool = lib.no_default,
origin: str = "unix",
cache: bool = True,
) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None:
"""
Convert argument to datetime.
This function converts a scalar, array-like, :class:`Series` or
:class:`DataFrame`/dict-like to a pandas datetime object.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime. If a :class:`DataFrame` is provided, the
method expects minimally the following columns: :const:`"year"`,
:const:`"month"`, :const:`"day"`. The column "year"
must be specified in 4-digit format.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception.
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`.
- If :const:`'ignore'`, then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"`
is parsed as :const:`2012-11-10`.
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
with day first.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
- If :const:`True` parses dates with the year first, e.g.
:const:`"10/11/12"` is parsed as :const:`2010-11-12`.
- If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is
preceded (same as :mod:`dateutil`).
.. warning::
``yearfirst=True`` is not strict, but will prefer to parse
with year first.
utc : bool, default False
Control timezone-related parsing, localization and conversion.
- If :const:`True`, the function *always* returns a timezone-aware
UTC-localized :class:`Timestamp`, :class:`Series` or
:class:`DatetimeIndex`. To do this, timezone-naive inputs are
*localized* as UTC, while timezone-aware inputs are *converted* to UTC.
- If :const:`False` (default), inputs will not be coerced to UTC.
Timezone-naive inputs will remain naive, while timezone-aware ones
will keep their time offsets. Limitations exist for mixed
offsets (typically, daylight savings), see :ref:`Examples
<to_datetime_tz_examples>` section for details.
.. warning::
In a future version of pandas, parsing datetimes with mixed time
zones will raise an error unless `utc=True`.
Please specify `utc=True` to opt in to the new behaviour
and silence this warning. To create a `Series` with mixed offsets and
`object` dtype, please use `apply` and `datetime.datetime.strptime`.
See also: pandas general documentation about `timezone conversion and
localization
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#time-zone-handling>`_.
format : str, default None
The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
`strftime documentation
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices, though
note that :const:`"%f"` will parse all the way up to nanoseconds.
You can also pass:
- "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
time string (not necessarily in exactly the same format);
- "mixed", to infer the format for each element individually. This is risky,
and you should probably use it along with `dayfirst`.
.. note::
If a :class:`DataFrame` is passed, then `format` has no effect.
exact : bool, default True
Control how `format` is used:
- If :const:`True`, require an exact `format` match.
- If :const:`False`, allow the `format` to match anywhere in the target
string.
Cannot be used alongside ``format='ISO8601'`` or ``format='mixed'``.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate
the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If :const:`True` and no `format` is given, attempt to infer the format
of the datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
.. deprecated:: 2.0.0
A strict version of this argument is now the default, passing it has
no effect.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01.
- If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to
beginning of Julian Calendar. Julian day number :const:`0` is assigned
to the day starting at noon on January 1, 4713 BC.
- If Timestamp convertible (Timestamp, dt.datetime, np.datetimt64 or date
string), origin is set to Timestamp identified by origin.
- If a float or integer, origin is the difference
(in units determined by the ``unit`` argument) relative to 1970-01-01.
cache : bool, default True
If :const:`True`, use a cache of unique, converted dates to apply the
datetime conversion. May produce significant speed-up when parsing
duplicate date strings, especially ones with timezone offsets. The cache
is only used when there are at least 50 values. The presence of
out-of-bounds values will render the cache unusable and may slow down
parsing.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input (types in parenthesis correspond to
fallback in case of unsuccessful timezone or out-of-range timestamp
parsing):
- scalar: :class:`Timestamp` (or :class:`datetime.datetime`)
- array-like: :class:`DatetimeIndex` (or :class:`Series` with
:class:`object` dtype containing :class:`datetime.datetime`)
- Series: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
- DataFrame: :class:`Series` of :class:`datetime64` dtype (or
:class:`Series` of :class:`object` dtype containing
:class:`datetime.datetime`)
Raises
------
ParserError
When parsing a date from string fails.
ValueError
When another datetime conversion error happens. For example when one
of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or
when a Timezone-aware :class:`datetime.datetime` is found in an array-like
of mixed time offsets, and ``utc=False``.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Notes
-----
Many input types are supported, and lead to different output types:
- **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime`
module or :mod:`numpy`). They are converted to :class:`Timestamp` when
possible, otherwise they are converted to :class:`datetime.datetime`.
None/NaN/null scalars are converted to :const:`NaT`.
- **array-like** can contain int, float, str, datetime objects. They are
converted to :class:`DatetimeIndex` when possible, otherwise they are
converted to :class:`Index` with :class:`object` dtype, containing
:class:`datetime.datetime`. None/NaN/null entries are converted to
:const:`NaT` in both cases.
- **Series** are converted to :class:`Series` with :class:`datetime64`
dtype when possible, otherwise they are converted to :class:`Series` with
:class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null
entries are converted to :const:`NaT` in both cases.
- **DataFrame/dict-like** are converted to :class:`Series` with
:class:`datetime64` dtype. For each row a datetime is created from assembling
the various dataframe columns. Column keys can be common abbreviations
like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or
plurals of the same.
The following causes are responsible for :class:`datetime.datetime` objects
being returned (possibly inside an :class:`Index` or a :class:`Series` with
:class:`object` dtype) instead of a proper pandas designated type
(:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series`
with :class:`datetime64` dtype):
- when any input element is before :const:`Timestamp.min` or after
:const:`Timestamp.max`, see `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_.
- when ``utc=False`` (default) and the input is an array-like or
:class:`Series` containing mixed naive/aware datetime, or aware with mixed
time offsets. Note that this happens in the (quite frequent) situation when
the timezone has a daylight savings policy. In that case you may wish to
use ``utc=True``.
Examples
--------
**Handling various input formats**
Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys
can be common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'],
dtype='datetime64[ns]', freq=None)
**Differences with strptime behavior**
:const:`"%f"` will parse all the way up to nanoseconds.
>>> pd.to_datetime('2018-10-26 12:00:00.0000000011',
... format='%Y-%m-%d %H:%M:%S.%f')
Timestamp('2018-10-26 12:00:00.000000001')
**Non-convertible date/times**
Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`,
in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
.. _to_datetime_tz_examples:
**Timezones and time offsets**
The default behaviour (``utc=False``) is as follows:
- Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00:00', '2018-10-26 13:00:15'])
DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'],
dtype='datetime64[ns]', freq=None)
- Timezone-aware inputs *with constant time offset* are converted to
timezone-aware :class:`DatetimeIndex`:
>>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500'])
DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'],
dtype='datetime64[ns, UTC-05:00]', freq=None)
- However, timezone-aware inputs *with mixed time offsets* (for example
issued from a timezone with daylight savings, such as Europe/Paris)
are **not successfully converted** to a :class:`DatetimeIndex`.
Parsing datetimes with mixed time zones will show a warning unless
`utc=True`. If you specify `utc=False` the warning below will be shown
and a simple :class:`Index` containing :class:`datetime.datetime`
objects will be returned:
>>> pd.to_datetime(['2020-10-25 02:00 +0200',
... '2020-10-25 04:00 +0100']) # doctest: +SKIP
FutureWarning: In a future version of pandas, parsing datetimes with mixed
time zones will raise an error unless `utc=True`. Please specify `utc=True`
to opt in to the new behaviour and silence this warning. To create a `Series`
with mixed offsets and `object` dtype, please use `apply` and
`datetime.datetime.strptime`.
Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00],
dtype='object')
- A mix of timezone-aware and timezone-naive inputs is also converted to
a simple :class:`Index` containing :class:`datetime.datetime` objects:
>>> from datetime import datetime
>>> pd.to_datetime(["2020-01-01 01:00:00-01:00",
... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP
FutureWarning: In a future version of pandas, parsing datetimes with mixed
time zones will raise an error unless `utc=True`. Please specify `utc=True`
to opt in to the new behaviour and silence this warning. To create a `Series`
with mixed offsets and `object` dtype, please use `apply` and
`datetime.datetime.strptime`.
Index([2020-01-01 01:00:00-01:00, 2020-01-01 03:00:00], dtype='object')
|
Setting ``utc=True`` solves most of the above issues:
- Timezone-naive inputs are *localized* as UTC
>>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Timezone-aware inputs are *converted* to UTC (the output represents the
exact same datetime, but viewed from the UTC time offset `+00:00`).
>>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'],
... utc=True)
DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
- Inputs can contain both string or datetime, the above
rules still apply
>>> pd.to_datetime(['2018-10-26 12:00', datetime(2020, 1, 1, 18)], utc=True)
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
if exact is not lib.no_default and format in {"mixed", "ISO8601"}:
raise ValueError("Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'")
if infer_datetime_format is not lib.no_default:
warnings.warn(
"The argument 'infer_datetime_format' is deprecated and will "
"be removed in a future version. "
"A strict version of it is now the default, see "
"https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
"You can safely remove this argument.",
stacklevel=find_stack_level(),
)
if errors == "ignore":
# GH#54467
warnings.warn(
"errors='ignore' is deprecated and will raise in a future version. "
"Use to_datetime without passing `errors` and catch exceptions "
"explicitly instead",
FutureWarning,
stacklevel=find_stack_level(),
)
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
convert_listlike = partial(
_convert_listlike_datetimes,
utc=utc,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
)
# pylint: disable-next=used-before-assignment
result: Timestamp | NaTType | Series | Index
if isinstance(arg, Timestamp):
result = arg
if utc:
if arg.tz is not None:
result = arg.tz_convert("utc")
else:
result = arg.tz_localize("utc")
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, utc)
elif isinstance(arg, Index):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, name=arg.name)
else:
result = convert_listlike(arg, format, name=arg.name)
elif is_list_like(arg):
try:
# error: Argument 1 to "_maybe_cache" has incompatible type
# "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray,
# ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...],
# Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]"
argc = cast(
Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg
)
cache_array = _maybe_cache(argc, format, cache, convert_listlike)
except OutOfBoundsDatetime:
# caching attempts to create a DatetimeIndex, which may raise
# an OOB. If that's the desired behavior, then just reraise...
if errors == "raise":
raise
# ... otherwise, continue without the cache.
from pandas import Series
cache_array = Series([], dtype=object) # just an empty array
if not cache_array.empty:
result = _convert_and_box_cache(argc, cache_array)
else:
result = convert_listlike(argc, format)
else:
result = convert_listlike(np.array([arg]), format)[0]
if isinstance(arg, bool) and isinstance(result, np.bool_):
result = bool(result) # TODO: avoid this kludge.
# error: Incompatible return value type (got "Union[Timestamp, NaTType,
# Series, Index]", expected "Union[DatetimeIndex, Series, float, str,
# NaTType, None]")
return result # type: ignore[return-value]
|
(arg: 'DatetimeScalarOrArrayConvertible | DictConvertible', errors: 'DateTimeErrorChoices' = 'raise', dayfirst: 'bool' = False, yearfirst: 'bool' = False, utc: 'bool' = False, format: 'str | None' = None, exact: 'bool | lib.NoDefault' = <no_default>, unit: 'str | None' = None, infer_datetime_format: 'lib.NoDefault | bool' = <no_default>, origin: 'str' = 'unix', cache: 'bool' = True) -> 'DatetimeIndex | Series | DatetimeScalar | NaTType | None'
|
68,382 |
pandas.core.tools.numeric
|
to_numeric
|
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can be stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
- If 'ignore', then invalid parsing will return the input.
.. versionchanged:: 2.2
"ignore" is deprecated. Catch exceptions explicitly instead.
downcast : str, default None
Can be 'integer', 'signed', 'unsigned', or 'float'.
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
ret
Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
Downcasting of nullable integer and floating dtypes is supported:
>>> s = pd.Series([1, 2, 3], dtype="Int64")
>>> pd.to_numeric(s, downcast="integer")
0 1
1 2
2 3
dtype: Int8
>>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
>>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.1
2 3.0
dtype: Float32
|
def to_numeric(
arg,
errors: DateTimeErrorChoices = "raise",
downcast: Literal["integer", "signed", "unsigned", "float"] | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can be stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
- If 'ignore', then invalid parsing will return the input.
.. versionchanged:: 2.2
"ignore" is deprecated. Catch exceptions explicitly instead.
downcast : str, default None
Can be 'integer', 'signed', 'unsigned', or 'float'.
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). Behaviour is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
(default).
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
DataFrame.
.. versionadded:: 2.0
Returns
-------
ret
Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
Downcasting of nullable integer and floating dtypes is supported:
>>> s = pd.Series([1, 2, 3], dtype="Int64")
>>> pd.to_numeric(s, downcast="integer")
0 1
1 2
2 3
dtype: Int8
>>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
>>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.1
2 3.0
dtype: Float32
"""
if downcast not in (None, "integer", "signed", "unsigned", "float"):
raise ValueError("invalid downcasting method provided")
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("invalid error value specified")
if errors == "ignore":
# GH#54467
warnings.warn(
"errors='ignore' is deprecated and will raise in a future version. "
"Use to_numeric without passing `errors` and catch exceptions "
"explicitly instead",
FutureWarning,
stacklevel=find_stack_level(),
)
check_dtype_backend(dtype_backend)
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndex):
is_index = True
if needs_i8_conversion(arg.dtype):
values = arg.view("i8")
else:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype="O")
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError("arg must be a list, tuple, 1-d array, or Series")
else:
values = arg
orig_values = values
# GH33013: for IntegerArray & FloatingArray extract non-null values for casting
# save mask to reconstruct the full array after casting
mask: npt.NDArray[np.bool_] | None = None
if isinstance(values, BaseMaskedArray):
mask = values._mask
values = values._data[~mask]
values_dtype = getattr(values, "dtype", None)
if isinstance(values_dtype, ArrowDtype):
mask = values.isna()
values = values.dropna().to_numpy()
new_mask: np.ndarray | None = None
if is_numeric_dtype(values_dtype):
pass
elif lib.is_np_dtype(values_dtype, "mM"):
values = values.view(np.int64)
else:
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
try:
values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload]
values,
set(),
coerce_numeric=coerce_numeric,
convert_to_masked_nullable=dtype_backend is not lib.no_default
or isinstance(values_dtype, StringDtype)
and not values_dtype.storage == "pyarrow_numpy",
)
except (ValueError, TypeError):
if errors == "raise":
raise
values = orig_values
if new_mask is not None:
# Remove unnecessary values, is expected later anyway and enables
# downcasting
values = values[~new_mask]
elif (
dtype_backend is not lib.no_default
and new_mask is None
or isinstance(values_dtype, StringDtype)
and not values_dtype.storage == "pyarrow_numpy"
):
new_mask = np.zeros(values.shape, dtype=np.bool_)
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values.dtype):
typecodes: str | None = None
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for typecode in typecodes:
dtype = np.dtype(typecode)
if dtype.itemsize <= values.dtype.itemsize:
values = maybe_downcast_numeric(values, dtype)
# successful conversion
if values.dtype == dtype:
break
# GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct
# masked array
if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype):
if mask is None or (new_mask is not None and new_mask.shape == mask.shape):
# GH 52588
mask = new_mask
else:
mask = mask.copy()
assert isinstance(mask, np.ndarray)
data = np.zeros(mask.shape, dtype=values.dtype)
data[~mask] = values
from pandas.core.arrays import (
ArrowExtensionArray,
BooleanArray,
FloatingArray,
IntegerArray,
)
klass: type[IntegerArray | BooleanArray | FloatingArray]
if is_integer_dtype(data.dtype):
klass = IntegerArray
elif is_bool_dtype(data.dtype):
klass = BooleanArray
else:
klass = FloatingArray
values = klass(data, mask)
if dtype_backend == "pyarrow" or isinstance(values_dtype, ArrowDtype):
values = ArrowExtensionArray(values.__arrow_array__())
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy
from pandas import Index
return Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
|
(arg, errors: 'DateTimeErrorChoices' = 'raise', downcast: "Literal['integer', 'signed', 'unsigned', 'float'] | None" = None, dtype_backend: 'DtypeBackend | lib.NoDefault' = <no_default>)
|
68,383 |
pandas.io.pickle
|
to_pickle
|
Pickle (serialize) object to file.
Parameters
----------
obj : any object
Any python object.
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function.
Also accepts URL. URL has to be of S3 or GCS.
compression : str or dict, default 'infer'
For on-the-fly compression of the output data. If 'infer' and 'filepath_or_buffer' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
Set to ``None`` for no compression.
Can also be a dict with key ``'method'`` set
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdCompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for faster compression and to create
a reproducible gzip archive:
``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``.
.. versionadded:: 1.5.0
Added support for `.tar` files.
.. versionchanged:: 1.4.0 Zstandard support.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For Python
2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
For Python >= 3.4, 4 is a valid value. A negative value for the
protocol parameter is equivalent to setting its value to
HIGHEST_PROTOCOL.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
.. [1] https://docs.python.org/3/library/pickle.html
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
|
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
)
def to_pickle(
obj: Any,
filepath_or_buffer: FilePath | WriteBuffer[bytes],
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions | None = None,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
obj : any object
Any python object.
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function.
Also accepts URL. URL has to be of S3 or GCS.
{compression_options}
.. versionchanged:: 1.4.0 Zstandard support.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For Python
2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
For Python >= 3.4, 4 is a valid value. A negative value for the
protocol parameter is equivalent to setting its value to
HIGHEST_PROTOCOL.
{storage_options}
.. [1] https://docs.python.org/3/library/pickle.html
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP
>>> original_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP
>>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP
>>> unpickled_df # doctest: +SKIP
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
""" # noqa: E501
if protocol < 0:
protocol = pickle.HIGHEST_PROTOCOL
with get_handle(
filepath_or_buffer,
"wb",
compression=compression,
is_text=False,
storage_options=storage_options,
) as handles:
# letting pickle write directly to the buffer is more memory-efficient
pickle.dump(obj, handles.handle, protocol=protocol)
|
(obj: 'Any', filepath_or_buffer: 'FilePath | WriteBuffer[bytes]', compression: 'CompressionOptions' = 'infer', protocol: 'int' = 5, storage_options: 'StorageOptions | None' = None) -> 'None'
|
68,384 |
pandas.core.tools.timedeltas
|
to_timedelta
|
Convert argument to timedelta.
Timedeltas are absolute differences in times, expressed in difference
units (e.g. days, hours, minutes, seconds). This method converts
an argument from a recognized timedelta format / value into
a Timedelta type.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta.
.. versionchanged:: 2.0
Strings with units 'M', 'Y' and 'y' do not represent
unambiguous timedelta values and will raise an exception.
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
Possible values:
* 'W'
* 'D' / 'days' / 'day'
* 'hours' / 'hour' / 'hr' / 'h' / 'H'
* 'm' / 'minute' / 'min' / 'minutes' / 'T'
* 's' / 'seconds' / 'sec' / 'second' / 'S'
* 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'
* 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'
* 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'
Must not be specified when `arg` contains strings and ``errors="raise"``.
.. deprecated:: 2.2.0
Units 'H', 'T', 'S', 'L', 'U' and 'N' are deprecated and will be removed
in a future version. Please use 'h', 'min', 's', 'ms', 'us', and 'ns'
instead of 'H', 'T', 'S', 'L', 'U' and 'N'.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
Returns
-------
timedelta
If parsing succeeded.
Return type depends on input:
- list-like: TimedeltaIndex of timedelta64 dtype
- Series: Series of timedelta64 dtype
- scalar: Timedelta
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
convert_dtypes : Convert dtypes.
Notes
-----
If the precision is higher than nanoseconds, the precision of the duration is
truncated to nanoseconds for string inputs.
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
|
def to_timedelta(
arg: str
| int
| float
| timedelta
| list
| tuple
| range
| ArrayLike
| Index
| Series,
unit: UnitChoices | None = None,
errors: DateTimeErrorChoices = "raise",
) -> Timedelta | TimedeltaIndex | Series:
"""
Convert argument to timedelta.
Timedeltas are absolute differences in times, expressed in difference
units (e.g. days, hours, minutes, seconds). This method converts
an argument from a recognized timedelta format / value into
a Timedelta type.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta.
.. versionchanged:: 2.0
Strings with units 'M', 'Y' and 'y' do not represent
unambiguous timedelta values and will raise an exception.
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
Possible values:
* 'W'
* 'D' / 'days' / 'day'
* 'hours' / 'hour' / 'hr' / 'h' / 'H'
* 'm' / 'minute' / 'min' / 'minutes' / 'T'
* 's' / 'seconds' / 'sec' / 'second' / 'S'
* 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'
* 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'
* 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'
Must not be specified when `arg` contains strings and ``errors="raise"``.
.. deprecated:: 2.2.0
Units 'H', 'T', 'S', 'L', 'U' and 'N' are deprecated and will be removed
in a future version. Please use 'h', 'min', 's', 'ms', 'us', and 'ns'
instead of 'H', 'T', 'S', 'L', 'U' and 'N'.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
Returns
-------
timedelta
If parsing succeeded.
Return type depends on input:
- list-like: TimedeltaIndex of timedelta64 dtype
- Series: Series of timedelta64 dtype
- scalar: Timedelta
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
convert_dtypes : Convert dtypes.
Notes
-----
If the precision is higher than nanoseconds, the precision of the duration is
truncated to nanoseconds for string inputs.
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
if unit is not None:
unit = parse_timedelta_unit(unit)
disallow_ambiguous_unit(unit)
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'.")
if errors == "ignore":
# GH#54467
warnings.warn(
"errors='ignore' is deprecated and will raise in a future version. "
"Use to_timedelta without passing `errors` and catch exceptions "
"explicitly instead",
FutureWarning,
stacklevel=find_stack_level(),
)
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, unit=unit, errors=errors)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndex):
return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name)
elif isinstance(arg, np.ndarray) and arg.ndim == 0:
# extract array scalar and process below
# error: Incompatible types in assignment (expression has type "object",
# variable has type "Union[str, int, float, timedelta, List[Any],
# Tuple[Any, ...], Union[Union[ExtensionArray, ndarray[Any, Any]], Index,
# Series]]") [assignment]
arg = lib.item_from_zerodim(arg) # type: ignore[assignment]
elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1:
return _convert_listlike(arg, unit=unit, errors=errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, timedelta, list, tuple, 1-d array, or Series"
)
if isinstance(arg, str) and unit is not None:
raise ValueError("unit must not be specified if the input is/contains a str")
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)
|
(arg: 'str | int | float | timedelta | list | tuple | range | ArrayLike | Index | Series', unit: 'UnitChoices | None' = None, errors: 'DateTimeErrorChoices' = 'raise') -> 'Timedelta | TimedeltaIndex | Series'
|
68,386 |
pandas.core.algorithms
|
unique
|
Return unique values based on a hash table.
Uniques are returned in order of appearance. This does NOT sort.
Significantly faster than numpy.unique for long enough sequences.
Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique : Return unique values from an Index.
Series.unique : Return unique values of Series object.
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(
... pd.Series(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
>>> pd.unique(
... pd.Index(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
>>> pd.unique(np.array(list("baabc"), dtype="O"))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.unique(
... pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... )
... )
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
An array of tuples
>>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values)
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
|
def unique(values):
"""
Return unique values based on a hash table.
Uniques are returned in order of appearance. This does NOT sort.
Significantly faster than numpy.unique for long enough sequences.
Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique : Return unique values from an Index.
Series.unique : Return unique values of Series object.
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(
... pd.Series(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
>>> pd.unique(
... pd.Index(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
>>> pd.unique(np.array(list("baabc"), dtype="O"))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.unique(
... pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... )
... )
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
An array of tuples
>>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values)
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
return unique_with_mask(values)
|
(values)
|
68,388 |
pandas.core.algorithms
|
value_counts
|
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : bool, default True
Sort by values
ascending : bool, default False
Sort in ascending order
normalize: bool, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : bool, default True
Don't include counts of NaN
Returns
-------
Series
|
def value_counts(
values,
sort: bool = True,
ascending: bool = False,
normalize: bool = False,
bins=None,
dropna: bool = True,
) -> Series:
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : bool, default True
Sort by values
ascending : bool, default False
Sort in ascending order
normalize: bool, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : bool, default True
Don't include counts of NaN
Returns
-------
Series
"""
warnings.warn(
# GH#53493
"pandas.value_counts is deprecated and will be removed in a "
"future version. Use pd.Series(obj).value_counts() instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return value_counts_internal(
values,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
|
(values, sort: 'bool' = True, ascending: 'bool' = False, normalize: 'bool' = False, bins=None, dropna: 'bool' = True) -> 'Series'
|
68,389 |
pandas.core.reshape.melt
|
wide_to_long
|
Unpivot a DataFrame from wide to long format.
Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame.
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s).
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`.
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form A-one,
B-two,.., and you have an unrelated column A-rating, you can ignore the
last one by specifying `suffix='(!?one|two)'`. When all suffixes are
numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
See Also
--------
melt : Unpivot a DataFrame from wide to long format, optionally leaving
identifiers set.
pivot : Create a spreadsheet-style pivot table as a DataFrame.
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),
... 'A(weekly)-2011': np.random.rand(3),
... 'B(weekly)-2010': np.random.rand(3),
... 'B(weekly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id
0 0.548814 0.544883 0.437587 0.383442 0 0
1 0.715189 0.423655 0.891773 0.791725 1 1
2 0.602763 0.645894 0.963663 0.528895 1 2
>>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(weekly) B(weekly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != []])
... )
>>> list(stubnames)
['A(weekly)', 'B(weekly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht_one ht_two
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
... sep='_', suffix=r'\w+')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
|
def wide_to_long(
df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+"
) -> DataFrame:
r"""
Unpivot a DataFrame from wide to long format.
Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame.
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s).
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`.
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form A-one,
B-two,.., and you have an unrelated column A-rating, you can ignore the
last one by specifying `suffix='(!?one|two)'`. When all suffixes are
numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
See Also
--------
melt : Unpivot a DataFrame from wide to long format, optionally leaving
identifiers set.
pivot : Create a spreadsheet-style pivot table as a DataFrame.
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),
... 'A(weekly)-2011': np.random.rand(3),
... 'B(weekly)-2010': np.random.rand(3),
... 'B(weekly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id
0 0.548814 0.544883 0.437587 0.383442 0 0
1 0.715189 0.423655 0.891773 0.791725 1 1
2 0.602763 0.645894 0.963663 0.528895 1 2
>>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(weekly) B(weekly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != []])
... )
>>> list(stubnames)
['A(weekly)', 'B(weekly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht_one ht_two
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
... sep='_', suffix=r'\w+')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
"""
def get_var_names(df, stub: str, sep: str, suffix: str):
regex = rf"^{re.escape(stub)}{re.escape(sep)}{suffix}$"
return df.columns[df.columns.str.match(regex)]
def melt_stub(df, stub: str, i, j, value_vars, sep: str):
newdf = melt(
df,
id_vars=i,
value_vars=value_vars,
value_name=stub.rstrip(sep),
var_name=j,
)
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "", regex=True)
# GH17627 Cast numerics suffixes to int/float
try:
newdf[j] = to_numeric(newdf[j])
except (TypeError, ValueError, OverflowError):
# TODO: anything else to catch?
pass
return newdf.set_index(i + [j])
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if df.columns.isin(stubnames).any():
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
_melted = []
value_vars_flattened = []
for stub in stubnames:
value_var = get_var_names(df, stub, sep, suffix)
value_vars_flattened.extend(value_var)
_melted.append(melt_stub(df, stub, i, j, value_var, sep))
melted = concat(_melted, axis=1)
id_vars = df.columns.difference(value_vars_flattened)
new = df[id_vars]
if len(i) == 1:
return new.set_index(i).join(melted)
else:
return new.merge(melted.reset_index(), on=i).set_index(i + [j])
|
(df: 'DataFrame', stubnames, i, j, sep: 'str' = '', suffix: 'str' = '\\d+') -> 'DataFrame'
|
68,390 |
recommonmark
|
setup
|
Initialize Sphinx extension.
|
def setup(app):
"""Initialize Sphinx extension."""
import sphinx
from .parser import CommonMarkParser
if sphinx.version_info >= (1, 8):
app.add_source_suffix('.md', 'markdown')
app.add_source_parser(CommonMarkParser)
elif sphinx.version_info >= (1, 4):
app.add_source_parser('.md', CommonMarkParser)
return {'version': __version__, 'parallel_read_safe': True}
|
(app)
|
68,391 |
svgwrite.drawing
|
Drawing
|
This is the SVG drawing represented by the top level *svg* element.
A drawing consists of any number of SVG elements contained within the drawing
element, stored in the *elements* attribute.
A drawing can range from an empty drawing (i.e., no content inside of the drawing),
to a very simple drawing containing a single SVG element such as a *rect*,
to a complex, deeply nested collection of container elements and graphics elements.
|
class Drawing(SVG, ElementFactory):
""" This is the SVG drawing represented by the top level *svg* element.
A drawing consists of any number of SVG elements contained within the drawing
element, stored in the *elements* attribute.
A drawing can range from an empty drawing (i.e., no content inside of the drawing),
to a very simple drawing containing a single SVG element such as a *rect*,
to a complex, deeply nested collection of container elements and graphics elements.
"""
def __init__(self, filename="noname.svg", size=('100%', '100%'), **extra):
"""
:param string filename: filesystem filename valid for :func:`open`
:param 2-tuple size: width, height
:param keywords extra: additional svg-attributes for the *SVG* object
Important (and not SVG Attributes) **extra** parameters:
:param string profile: ``'tiny | full'`` - define the SVG baseProfile
:param bool debug: switch validation on/off
"""
super(Drawing, self).__init__(size=size, **extra)
self.filename = filename
self._stylesheets = [] # list of stylesheets appended
def get_xml(self):
""" Get the XML representation as `ElementTree` object.
:return: XML `ElementTree` of this object and all its subelements
"""
profile = self.profile
version = self.version
self.attribs['xmlns'] = "http://www.w3.org/2000/svg"
self.attribs['xmlns:xlink'] = "http://www.w3.org/1999/xlink"
self.attribs['xmlns:ev'] = "http://www.w3.org/2001/xml-events"
self.attribs['baseProfile'] = profile
self.attribs['version'] = version
return super(Drawing, self).get_xml()
def add_stylesheet(self, href, title, alternate="no", media="screen"):
""" Add a stylesheet reference.
:param string href: link to stylesheet <URI>
:param string title: name of stylesheet
:param string alternate: ``'yes'|'no'``
:param string media: ``'all | aureal | braille | embossed | handheld | print | projection | screen | tty | tv'``
"""
self._stylesheets.append((href, title, alternate, media))
def write(self, fileobj, pretty=False, indent=2):
""" Write XML string to `fileobj`.
:param fileobj: a file-like object
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
Python 3.x - set encoding at the open command::
open('filename', 'w', encoding='utf-8')
"""
# write xml header
fileobj.write('<?xml version="1.0" encoding="utf-8" ?>\n')
# don't use DOCTYPE. It's useless. see also:
# http://tech.groups.yahoo.com/group/svg-developers/message/48562
# write stylesheets
stylesheet_template = '<?xml-stylesheet href="%s" type="text/css" ' \
'title="%s" alternate="%s" media="%s"?>\n'
# removed map(), does not work with Python 3
for stylesheet in self._stylesheets:
fileobj.write(stylesheet_template % stylesheet)
xml_string = self.tostring()
if pretty: # write easy readable XML file
xml_string = pretty_xml(xml_string, indent=indent)
fileobj.write(xml_string)
def save(self, pretty=False, indent=2):
""" Write the XML string to `self.filename`.
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
"""
fileobj = io.open(self.filename, mode='w', encoding='utf-8')
self.write(fileobj, pretty=pretty, indent=indent)
fileobj.close()
def saveas(self, filename, pretty=False, indent=2):
""" Write the XML string to `filename`.
:param string filename: filesystem filename valid for :func:`open`
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
"""
self.filename = filename
self.save(pretty=pretty, indent=indent)
def _repr_svg_(self):
""" Show SVG in IPython, Jupyter Notebook, and Jupyter Lab
:return: unicode XML string of this object and all its subelements
"""
return self.tostring()
|
(filename='noname.svg', size=('100%', '100%'), **extra)
|
68,392 |
svgwrite.elementfactory
|
__getattr__
| null |
def __getattr__(self, name):
if name in factoryelements:
return ElementBuilder(factoryelements[name], self)
else:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__.__name__, name))
|
(self, name)
|
68,393 |
svgwrite.base
|
__getitem__
|
Get SVG attribute by `key`.
:param string key: SVG attribute name
:return: SVG attribute value
|
def __getitem__(self, key):
""" Get SVG attribute by `key`.
:param string key: SVG attribute name
:return: SVG attribute value
"""
return self.attribs[key]
|
(self, key)
|
68,394 |
svgwrite.drawing
|
__init__
|
:param string filename: filesystem filename valid for :func:`open`
:param 2-tuple size: width, height
:param keywords extra: additional svg-attributes for the *SVG* object
Important (and not SVG Attributes) **extra** parameters:
:param string profile: ``'tiny | full'`` - define the SVG baseProfile
:param bool debug: switch validation on/off
|
def __init__(self, filename="noname.svg", size=('100%', '100%'), **extra):
"""
:param string filename: filesystem filename valid for :func:`open`
:param 2-tuple size: width, height
:param keywords extra: additional svg-attributes for the *SVG* object
Important (and not SVG Attributes) **extra** parameters:
:param string profile: ``'tiny | full'`` - define the SVG baseProfile
:param bool debug: switch validation on/off
"""
super(Drawing, self).__init__(size=size, **extra)
self.filename = filename
self._stylesheets = [] # list of stylesheets appended
|
(self, filename='noname.svg', size=('100%', '100%'), **extra)
|
68,395 |
svgwrite.base
|
__setitem__
|
Set SVG attribute by `key` to `value`.
:param string key: SVG attribute name
:param object value: SVG attribute value
|
def __setitem__(self, key, value):
""" Set SVG attribute by `key` to `value`.
:param string key: SVG attribute name
:param object value: SVG attribute value
"""
# Attribute checking is only done by using the __setitem__() method or
# by self['attribute'] = value
if self.debug:
self.validator.check_svg_attribute_value(self.elementname, key, value)
self.attribs[key] = value
|
(self, key, value)
|
68,396 |
svgwrite.container
|
_embed_font_data
| null |
def _embed_font_data(self, name, data, mimetype):
content = FONT_TEMPLATE.format(name=name, data=base64_data(data, mimetype))
self.embed_stylesheet(content)
|
(self, name, data, mimetype)
|
68,397 |
svgwrite.drawing
|
_repr_svg_
|
Show SVG in IPython, Jupyter Notebook, and Jupyter Lab
:return: unicode XML string of this object and all its subelements
|
def _repr_svg_(self):
""" Show SVG in IPython, Jupyter Notebook, and Jupyter Lab
:return: unicode XML string of this object and all its subelements
"""
return self.tostring()
|
(self)
|
68,398 |
svgwrite.base
|
add
|
Add an SVG element as subelement.
:param element: append this SVG element
:returns: the added element
|
def add(self, element):
""" Add an SVG element as subelement.
:param element: append this SVG element
:returns: the added element
"""
if self.debug:
self.validator.check_valid_children(self.elementname, element.elementname)
self.elements.append(element)
return element
|
(self, element)
|
68,399 |
svgwrite.drawing
|
add_stylesheet
|
Add a stylesheet reference.
:param string href: link to stylesheet <URI>
:param string title: name of stylesheet
:param string alternate: ``'yes'|'no'``
:param string media: ``'all | aureal | braille | embossed | handheld | print | projection | screen | tty | tv'``
|
def add_stylesheet(self, href, title, alternate="no", media="screen"):
""" Add a stylesheet reference.
:param string href: link to stylesheet <URI>
:param string title: name of stylesheet
:param string alternate: ``'yes'|'no'``
:param string media: ``'all | aureal | braille | embossed | handheld | print | projection | screen | tty | tv'``
"""
self._stylesheets.append((href, title, alternate, media))
|
(self, href, title, alternate='no', media='screen')
|
68,400 |
svgwrite.mixins
|
clip_rect
|
Set SVG Property **clip**.
|
def clip_rect(self, top='auto', right='auto', bottom='auto', left='auto'):
"""
Set SVG Property **clip**.
"""
self['clip'] = "rect(%s,%s,%s,%s)" % (top, right, bottom, left)
|
(self, top='auto', right='auto', bottom='auto', left='auto')
|
68,401 |
svgwrite.base
|
copy
| null |
def copy(self):
newobj = copy.copy(self) # shallow copy of object
newobj.attribs = copy.copy(self.attribs) # shallow copy of attributes
newobj.elements = copy.copy(self.elements) # shallow copy of subelements
if 'id' in newobj.attribs: # create a new 'id'
newobj['id'] = newobj.next_id()
return newobj
|
(self)
|
68,402 |
svgwrite.mixins
|
dasharray
|
Set SVG Properties **stroke-dashoffset** and **stroke-dasharray**.
Where *dasharray* specify the lengths of alternating dashes and gaps as
<list> of <int> or <float> values or a <string> of comma and/or white
space separated <lengths> or <percentages>. (e.g. as <list> dasharray=[1, 0.5]
or as <string> dasharray='1 0.5')
|
def dasharray(self, dasharray=None, offset=None):
"""
Set SVG Properties **stroke-dashoffset** and **stroke-dasharray**.
Where *dasharray* specify the lengths of alternating dashes and gaps as
<list> of <int> or <float> values or a <string> of comma and/or white
space separated <lengths> or <percentages>. (e.g. as <list> dasharray=[1, 0.5]
or as <string> dasharray='1 0.5')
"""
if dasharray is not None:
self['stroke-dasharray'] = strlist(dasharray, ' ')
if offset is not None:
self['stroke-dashoffset'] = offset
return self
|
(self, dasharray=None, offset=None)
|
68,403 |
svgwrite.container
|
embed_font
|
Embed font as base64 encoded data from font file.
:param name: font name
:param filename: file name of local stored font
|
def embed_font(self, name, filename):
""" Embed font as base64 encoded data from font file.
:param name: font name
:param filename: file name of local stored font
"""
data = open(filename, 'rb').read()
self._embed_font_data(name, data, font_mimetype(filename))
|
(self, name, filename)
|
68,404 |
svgwrite.container
|
embed_google_web_font
|
Embed font as base64 encoded data acquired from google fonts.
:param name: font name
:param uri: google fonts request uri like 'http://fonts.googleapis.com/css?family=Indie+Flower'
|
def embed_google_web_font(self, name, uri):
""" Embed font as base64 encoded data acquired from google fonts.
:param name: font name
:param uri: google fonts request uri like 'http://fonts.googleapis.com/css?family=Indie+Flower'
"""
font_info = urlopen(uri).read()
font_url = find_first_url(font_info.decode())
if font_url is None:
raise ValueError("Got no font data from uri: '{}'".format(uri))
else:
data = urlopen(font_url).read()
self._embed_font_data(name, data, font_mimetype(font_url))
|
(self, name, uri)
|
68,405 |
svgwrite.container
|
embed_stylesheet
|
Add <style> tag to the defs section.
:param content: style sheet content as string
:return: :class:`~svgwrite.container.Style` object
|
def embed_stylesheet(self, content):
""" Add <style> tag to the defs section.
:param content: style sheet content as string
:return: :class:`~svgwrite.container.Style` object
"""
return self.defs.add(Style(content))
|
(self, content)
|
68,406 |
svgwrite.mixins
|
fill
|
Set SVG Properties **fill**, **fill-rule** and **fill-opacity**.
|
def fill(self, color=None, rule=None, opacity=None):
"""
Set SVG Properties **fill**, **fill-rule** and **fill-opacity**.
"""
if color is not None:
if is_string(color):
self['fill'] = color
else:
self['fill'] = color.get_paint_server()
if rule is not None:
self['fill-rule'] = rule
if opacity is not None:
self['fill-opacity'] = opacity
return self
|
(self, color=None, rule=None, opacity=None)
|
68,407 |
svgwrite.mixins
|
fit
|
Set the **preserveAspectRatio** attribute.
:param string horiz: horizontal alignment ``'left | center | right'``
:param string vert: vertical alignment ``'top | middle | bottom'``
:param string scale: scale method ``'meet | slice'``
============= =======================================================
Scale methods Description
============= =======================================================
``'meet'`` preserve aspect ration and zoom to limits of viewBox
``'slice'`` preserve aspect ration and viewBox touch viewport on
all bounds, viewBox will extend beyond the bounds of
the viewport
============= =======================================================
|
def fit(self, horiz="center", vert="middle", scale="meet"):
""" Set the **preserveAspectRatio** attribute.
:param string horiz: horizontal alignment ``'left | center | right'``
:param string vert: vertical alignment ``'top | middle | bottom'``
:param string scale: scale method ``'meet | slice'``
============= =======================================================
Scale methods Description
============= =======================================================
``'meet'`` preserve aspect ration and zoom to limits of viewBox
``'slice'`` preserve aspect ration and viewBox touch viewport on
all bounds, viewBox will extend beyond the bounds of
the viewport
============= =======================================================
"""
if self.debug and scale not in ('meet', 'slice'):
raise ValueError("Invalid scale parameter '%s'" % scale)
self['preserveAspectRatio'] = "%s%s %s" % (_horiz[horiz],_vert[vert], scale)
|
(self, horiz='center', vert='middle', scale='meet')
|
68,408 |
svgwrite.base
|
get_funciri
|
Get the `FuncIRI` reference string of the object. (i.e. ``'url(#id)'``).
:returns: `string`
|
def get_funciri(self):
"""
Get the `FuncIRI` reference string of the object. (i.e. ``'url(#id)'``).
:returns: `string`
"""
return "url(%s)" % self.get_iri()
|
(self)
|
68,409 |
svgwrite.base
|
get_id
|
Get the object `id` string, if the object does not have an `id`,
a new `id` will be created.
:returns: `string`
|
def get_id(self):
""" Get the object `id` string, if the object does not have an `id`,
a new `id` will be created.
:returns: `string`
"""
if 'id' not in self.attribs:
self.attribs['id'] = self.next_id()
return self.attribs['id']
|
(self)
|
68,410 |
svgwrite.base
|
get_iri
|
Get the `IRI` reference string of the object. (i.e., ``'#id'``).
:returns: `string`
|
def get_iri(self):
"""
Get the `IRI` reference string of the object. (i.e., ``'#id'``).
:returns: `string`
"""
return "#%s" % self.get_id()
|
(self)
|
68,411 |
svgwrite.drawing
|
get_xml
|
Get the XML representation as `ElementTree` object.
:return: XML `ElementTree` of this object and all its subelements
|
def get_xml(self):
""" Get the XML representation as `ElementTree` object.
:return: XML `ElementTree` of this object and all its subelements
"""
profile = self.profile
version = self.version
self.attribs['xmlns'] = "http://www.w3.org/2000/svg"
self.attribs['xmlns:xlink'] = "http://www.w3.org/1999/xlink"
self.attribs['xmlns:ev'] = "http://www.w3.org/2001/xml-events"
self.attribs['baseProfile'] = profile
self.attribs['version'] = version
return super(Drawing, self).get_xml()
|
(self)
|
68,412 |
svgwrite.base
|
next_id
| null |
def next_id(self, value=None):
return AutoID.next_id(value)
|
(self, value=None)
|
68,413 |
svgwrite.drawing
|
save
|
Write the XML string to `self.filename`.
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
|
def save(self, pretty=False, indent=2):
""" Write the XML string to `self.filename`.
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
"""
fileobj = io.open(self.filename, mode='w', encoding='utf-8')
self.write(fileobj, pretty=pretty, indent=indent)
fileobj.close()
|
(self, pretty=False, indent=2)
|
68,414 |
svgwrite.drawing
|
saveas
|
Write the XML string to `filename`.
:param string filename: filesystem filename valid for :func:`open`
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
|
def saveas(self, filename, pretty=False, indent=2):
""" Write the XML string to `filename`.
:param string filename: filesystem filename valid for :func:`open`
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
"""
self.filename = filename
self.save(pretty=pretty, indent=indent)
|
(self, filename, pretty=False, indent=2)
|
68,415 |
svgwrite.base
|
set_desc
|
Insert a **title** and/or a **desc** element as first subelement.
|
def set_desc(self, title=None, desc=None):
""" Insert a **title** and/or a **desc** element as first subelement.
"""
if desc is not None:
self.elements.insert(0, Desc(desc))
if title is not None:
self.elements.insert(0, Title(title))
|
(self, title=None, desc=None)
|
68,416 |
svgwrite.base
|
set_metadata
|
:param xmldata: an xml.etree.ElementTree - Element() object.
|
def set_metadata(self, xmldata):
"""
:param xmldata: an xml.etree.ElementTree - Element() object.
"""
metadata = Metadata(xmldata)
if len(self.elements) == 0:
self.elements.append(metadata)
else:
pos = 0
while self.elements[pos].elementname in ('title', 'desc'):
pos += 1
if pos == len(self.elements):
self.elements.append(metadata)
return
if self.elements[pos].elementname == 'metadata':
self.elements[pos].xml.append(xmldata)
else:
self.elements.insert(pos, metadata)
|
(self, xmldata)
|
68,417 |
svgwrite.base
|
set_parameter
| null |
def set_parameter(self, parameter):
self._parameter = parameter
|
(self, parameter)
|
68,418 |
svgwrite.mixins
|
stretch
|
Stretch viewBox in x and y direction to fill viewport, does not
preserve aspect ratio.
|
def stretch(self):
""" Stretch viewBox in x and y direction to fill viewport, does not
preserve aspect ratio.
"""
self['preserveAspectRatio'] = 'none'
|
(self)
|
68,419 |
svgwrite.mixins
|
stroke
|
Set SVG Properties **stroke**, **stroke-width**, **stroke-opacity**,
**stroke-linecap** and **stroke-miterlimit**.
|
def stroke(self, color=None, width=None, opacity=None, linecap=None,
linejoin=None, miterlimit=None):
"""
Set SVG Properties **stroke**, **stroke-width**, **stroke-opacity**,
**stroke-linecap** and **stroke-miterlimit**.
"""
if color is not None:
if is_string(color):
self['stroke'] = color
else:
self['stroke'] = color.get_paint_server()
if width is not None:
self['stroke-width'] = width
if opacity is not None:
self['stroke-opacity'] = opacity
if linecap is not None:
self['stroke-linecap'] = linecap
if linejoin is not None:
self['stroke-linejoin'] = linejoin
if miterlimit is not None:
self['stroke-miterlimit'] = miterlimit
return self
|
(self, color=None, width=None, opacity=None, linecap=None, linejoin=None, miterlimit=None)
|
68,420 |
svgwrite.base
|
tostring
|
Get the XML representation as unicode `string`.
:return: unicode XML string of this object and all its subelements
|
def tostring(self):
""" Get the XML representation as unicode `string`.
:return: unicode XML string of this object and all its subelements
"""
xml = self.get_xml()
# required for Python 2 support
xml_utf8_str = etree.tostring(xml, encoding='utf-8')
return xml_utf8_str.decode('utf-8')
# just Python 3: return etree.tostring(xml, encoding='unicode')
|
(self)
|
68,421 |
svgwrite.base
|
update
|
Update SVG Attributes from `dict` attribs.
Rules for keys:
1. trailing '_' will be removed (``'class_'`` -> ``'class'``)
2. inner '_' will be replaced by '-' (``'stroke_width'`` -> ``'stroke-width'``)
|
def update(self, attribs):
""" Update SVG Attributes from `dict` attribs.
Rules for keys:
1. trailing '_' will be removed (``'class_'`` -> ``'class'``)
2. inner '_' will be replaced by '-' (``'stroke_width'`` -> ``'stroke-width'``)
"""
for key, value in attribs.items():
# remove trailing underscores
# and replace inner underscores
key = key.rstrip('_').replace('_', '-')
self.__setitem__(key, value)
|
(self, attribs)
|
68,422 |
svgwrite.base
|
value_to_string
|
Converts *value* into a <string> includes a value check, depending
on :attr:`self.debug` and :attr:`self.profile`.
|
def value_to_string(self, value):
"""
Converts *value* into a <string> includes a value check, depending
on :attr:`self.debug` and :attr:`self.profile`.
"""
if isinstance(value, (int, float)):
if self.debug:
self.validator.check_svg_type(value, 'number')
if isinstance(value, float) and self.profile == 'tiny':
value = round(value, 4)
return str(value)
|
(self, value)
|
68,423 |
svgwrite.mixins
|
viewbox
|
Specify a rectangle in **user space** (no units allowed) which
should be mapped to the bounds of the viewport established by the
given element.
:param number minx: left border of the viewBox
:param number miny: top border of the viewBox
:param number width: width of the viewBox
:param number height: height of the viewBox
|
def viewbox(self, minx=0, miny=0, width=0, height=0):
""" Specify a rectangle in **user space** (no units allowed) which
should be mapped to the bounds of the viewport established by the
given element.
:param number minx: left border of the viewBox
:param number miny: top border of the viewBox
:param number width: width of the viewBox
:param number height: height of the viewBox
"""
self['viewBox'] = strlist( [minx, miny, width, height] )
|
(self, minx=0, miny=0, width=0, height=0)
|
68,424 |
svgwrite.drawing
|
write
|
Write XML string to `fileobj`.
:param fileobj: a file-like object
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
Python 3.x - set encoding at the open command::
open('filename', 'w', encoding='utf-8')
|
def write(self, fileobj, pretty=False, indent=2):
""" Write XML string to `fileobj`.
:param fileobj: a file-like object
:param pretty: True for easy readable output
:param indent: how much to indent if pretty is enabled, by default 2 spaces
Python 3.x - set encoding at the open command::
open('filename', 'w', encoding='utf-8')
"""
# write xml header
fileobj.write('<?xml version="1.0" encoding="utf-8" ?>\n')
# don't use DOCTYPE. It's useless. see also:
# http://tech.groups.yahoo.com/group/svg-developers/message/48562
# write stylesheets
stylesheet_template = '<?xml-stylesheet href="%s" type="text/css" ' \
'title="%s" alternate="%s" media="%s"?>\n'
# removed map(), does not work with Python 3
for stylesheet in self._stylesheets:
fileobj.write(stylesheet_template % stylesheet)
xml_string = self.tostring()
if pretty: # write easy readable XML file
xml_string = pretty_xml(xml_string, indent=indent)
fileobj.write(xml_string)
|
(self, fileobj, pretty=False, indent=2)
|
68,425 |
svgwrite
|
Unit
|
Add units to values.
|
class Unit(object):
""" Add units to values.
"""
def __init__(self, unit='cm'):
""" Unit constructor
:param str unit: specify the unit string
"""
self._unit = unit
def __rmul__(self, other):
""" add unit-string to 'other'. (e.g. 5*cm => '5cm') """
return "%s%s" % (other, self._unit)
def __call__(self, *args):
""" Add unit-strings to all arguments.
:param args: list of values
e.g.: cm(1,2,3) => '1cm,2cm,3cm'
"""
return ','.join(["%s%s" % (arg, self._unit) for arg in args])
|
(unit='cm')
|
68,426 |
svgwrite
|
__call__
|
Add unit-strings to all arguments.
:param args: list of values
e.g.: cm(1,2,3) => '1cm,2cm,3cm'
|
def __call__(self, *args):
""" Add unit-strings to all arguments.
:param args: list of values
e.g.: cm(1,2,3) => '1cm,2cm,3cm'
"""
return ','.join(["%s%s" % (arg, self._unit) for arg in args])
|
(self, *args)
|
68,427 |
svgwrite
|
__init__
|
Unit constructor
:param str unit: specify the unit string
|
def __init__(self, unit='cm'):
""" Unit constructor
:param str unit: specify the unit string
"""
self._unit = unit
|
(self, unit='cm')
|
68,428 |
svgwrite
|
__rmul__
|
add unit-string to 'other'. (e.g. 5*cm => '5cm')
|
def __rmul__(self, other):
""" add unit-string to 'other'. (e.g. 5*cm => '5cm') """
return "%s%s" % (other, self._unit)
|
(self, other)
|
68,444 |
svgwrite.utils
|
rgb
|
Convert **r**, **g**, **b** values to a `string`.
:param r: red part
:param g: green part
:param b: blue part
:param string mode: ``'RGB | %'``
:rtype: string
========= =============================================================
mode Description
========= =============================================================
``'RGB'`` returns a rgb-string format: ``'rgb(r, g, b)'``
``'%'`` returns percent-values as rgb-string format: ``'rgb(r%, g%, b%)'``
========= =============================================================
|
def rgb(r=0, g=0, b=0, mode='RGB'):
"""
Convert **r**, **g**, **b** values to a `string`.
:param r: red part
:param g: green part
:param b: blue part
:param string mode: ``'RGB | %'``
:rtype: string
========= =============================================================
mode Description
========= =============================================================
``'RGB'`` returns a rgb-string format: ``'rgb(r, g, b)'``
``'%'`` returns percent-values as rgb-string format: ``'rgb(r%, g%, b%)'``
========= =============================================================
"""
def percent(value):
value = float(value)
if value < 0:
value = 0
if value > 100:
value = 100
return value
if mode.upper() == 'RGB':
return "rgb(%d,%d,%d)" % (int(r) & 255, int(g) & 255, int(b) & 255)
elif mode == "%":
# see http://www.w3.org/TR/SVG11/types.html#DataTypeColor
# percentage is an 'number' value
return "rgb(%d%%,%d%%,%d%%)" % (percent(r), percent(g), percent(b))
else:
raise ValueError("Invalid mode '%s'" % mode)
|
(r=0, g=0, b=0, mode='RGB')
|
68,453 |
heroku3.core
|
from_key
|
Returns an authenticated Heroku instance, via API Key.
|
def from_key(api_key, session=None, **kwargs):
"""Returns an authenticated Heroku instance, via API Key."""
if not session:
session = requests.session()
# If I'm being passed an API key then I should use only this api key
# if trust_env=True then Heroku will silently fallback to netrc authentication
session.trust_env = False
h = Heroku(session=session, **kwargs)
# Login.
h.authenticate(api_key)
return h
|
(api_key, session=None, **kwargs)
|
68,459 |
detect_secrets.core.secrets_collection
|
SecretsCollection
| null |
class SecretsCollection:
def __init__(self, root: str = '') -> None:
"""
:param root: if specified, will scan as if the root was the value provided,
rather than the current working directory. We still store results as if
relative to root, since we're running as if it was in a different directory,
rather than scanning a different directory.
"""
self.data: Dict[str, Set[PotentialSecret]] = defaultdict(set)
self.root = root
@classmethod
def load_from_baseline(cls, baseline: Dict[str, Any]) -> 'SecretsCollection':
output = cls()
for filename in baseline['results']:
for item in baseline['results'][filename]:
secret = PotentialSecret.load_secret_from_dict({'filename': filename, **item})
output[convert_local_os_path(filename)].add(secret)
return output
@property
def files(self) -> Set[str]:
return set(self.data.keys())
def scan_files(self, *filenames: str, num_processors: Optional[int] = None) -> None:
"""Just like scan_file, but optimized through parallel processing."""
if len(filenames) == 1:
self.scan_file(filenames[0])
return
if not num_processors:
num_processors = mp.cpu_count()
child_process_settings = get_settings().json()
with mp.Pool(
processes=num_processors,
initializer=configure_settings_from_baseline,
initargs=(child_process_settings,),
) as p:
for secrets in p.imap_unordered(
_scan_file_and_serialize,
[os.path.join(self.root, filename) for filename in filenames],
):
for secret in secrets:
self[os.path.relpath(secret.filename, self.root)].add(secret)
def scan_file(self, filename: str) -> None:
for secret in scan.scan_file(os.path.join(self.root, convert_local_os_path(filename))):
self[convert_local_os_path(filename)].add(secret)
def scan_diff(self, diff: str) -> None:
"""
:raises: UnidiffParseError
"""
try:
for secret in scan.scan_diff(diff):
self[secret.filename].add(secret)
except ImportError: # pragma: no cover
raise NotImplementedError(
'SecretsCollection.scan_diff requires `unidiff` to work. Try pip '
'installing that package, and try again.',
)
def merge(self, old_results: 'SecretsCollection') -> None:
"""
We operate under an assumption that the latest results are always more accurate,
assuming that the baseline is created on the same repository. However, we cannot
merely discard the old results in favor of the new, since there is valuable information
that ought to be preserved: verification of secrets, both automated and manual.
Therefore, this function serves to extract this information from the old results,
and amend the new results with it.
"""
for filename in old_results.files:
if filename not in self.files:
continue
# This allows us to obtain the same secret, by accessing the hash.
mapping = {
secret: secret
for secret in self.data[filename]
}
for old_secret in old_results.data[filename]:
if old_secret not in mapping:
continue
# Only override if there's no newer value.
if mapping[old_secret].is_secret is None:
mapping[old_secret].is_secret = old_secret.is_secret
# If the old value is false, it won't make a difference.
if not mapping[old_secret].is_verified:
mapping[old_secret].is_verified = old_secret.is_verified
def trim(
self,
scanned_results: Optional['SecretsCollection'] = None,
filelist: Optional[List[str]] = None,
) -> None:
"""
Removes invalid entries in the current SecretsCollection.
This behaves *kinda* like set intersection and left-join. That is, for matching files,
a set intersection is performed. For non-matching files, only the files in `self` will
be kept.
This is because we may not be constructing the other SecretsCollection with the same
information as we are with the current SecretsCollection, and we cannot infer based on
incomplete information. As such, we will keep the status quo.
Assumptions:
1. Both `scanned_results` and the current SecretsCollection are constructed using
the same settings (otherwise, we can't determine whether a missing secret is due
to newly filtered secrets, or actually removed).
:param scanned_results: if None, will just clear out non-existent files.
:param filelist: files without secrets are not present in `scanned_results`. Therefore,
by supplying this additional filelist, we can assert that if an entry is missing in
`scanned_results`, it must not have secrets in it.
"""
if scanned_results is None:
scanned_results = SecretsCollection()
filelist = [
filename
for filename in self.files
if not os.path.exists(filename)
]
if not filelist:
fileset = set()
else:
fileset = set(filelist)
# Unfortunately, we can't merely do a set intersection since we want to update the line
# numbers (if applicable). Therefore, this does it manually.
result: Dict[str, Set[PotentialSecret]] = defaultdict(set)
for filename in scanned_results.files:
if filename not in self.files:
continue
# We construct this so we can get O(1) retrieval of secrets.
existing_secret_map = {secret: secret for secret in self[filename]}
for secret in scanned_results[filename]:
if secret not in existing_secret_map:
continue
# Currently, we assume that the `scanned_results` have no labelled data, so
# we only want to obtain the latest line number from it.
existing_secret = existing_secret_map[secret]
if existing_secret.line_number:
# Only update line numbers if we're tracking them.
existing_secret.line_number = secret.line_number
result[filename].add(existing_secret)
for filename in self.files:
# If this is already populated by scanned_results, then the set intersection
# is already completed.
if filename in result:
continue
# All secrets relating to that file was removed.
# We know this because:
# 1. It's a file that was scanned (in filelist)
# 2. It would have been in the baseline, if there were secrets...
# 3. ...but it isn't.
if filename in fileset:
continue
result[filename] = self[filename]
self.data = result
def json(self) -> Dict[str, Any]:
"""Custom JSON encoder"""
output = defaultdict(list)
for filename, secret in self:
output[filename].append(secret.json())
return dict(output)
def exactly_equals(self, other: Any) -> bool:
return self.__eq__(other, strict=True) # type: ignore
def __getitem__(self, filename: str) -> Set[PotentialSecret]:
return self.data[filename]
def __setitem__(self, filename: str, value: Set[PotentialSecret]) -> None:
self.data[filename] = value
def __iter__(self) -> Generator[Tuple[str, PotentialSecret], None, None]:
for filename in sorted(self.files):
secrets = self[filename]
# NOTE: If line numbers aren't supplied, they are supposed to default to 0.
for secret in sorted(
secrets,
key=lambda secret: (
getattr(secret, 'line_number', 0),
secret.secret_hash,
secret.type,
),
):
yield filename, secret
def __bool__(self) -> bool:
# This checks whether there are secrets, rather than just empty files.
# Empty files can occur with SecretsCollection subtraction.
return bool(list(self))
def __eq__(self, other: Any, strict: bool = False) -> bool:
"""
:param strict: if strict, will return False even if secrets match
(e.g. if line numbers are different)
"""
if not isinstance(other, SecretsCollection):
raise NotImplementedError
if self.files != other.files:
return False
for filename in self.files:
self_mapping = {
(secret.secret_hash, secret.type): secret for secret in self[filename]
}
other_mapping = {
(secret.secret_hash, secret.type): secret for secret in other[filename]
}
# Since PotentialSecret is hashable, we compare their identities through this.
if set(self_mapping.values()) != set(other_mapping.values()):
return False
if not strict:
continue
for secretA in self_mapping.values():
secretB = other_mapping[(secretA.secret_hash, secretA.type)]
valuesA = vars(secretA)
valuesA.pop('secret_value')
valuesB = vars(secretB)
valuesB.pop('secret_value')
if valuesA['line_number'] == 0 or valuesB['line_number'] == 0:
# If line numbers are not provided (for either one), then don't compare
# line numbers.
valuesA.pop('line_number')
valuesB.pop('line_number')
if valuesA != valuesB:
return False
return True
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __sub__(self, other: Any) -> 'SecretsCollection':
"""This behaves like set subtraction."""
if not isinstance(other, SecretsCollection):
raise NotImplementedError
# We want to create a copy to follow convention and adhere to the principle
# of least surprise.
output = SecretsCollection()
for filename in other.files:
if filename not in self.files:
continue
output[filename] = self[filename] - other[filename]
for filename in self.files:
if filename in other.files:
continue
output[filename] = self[filename]
return output
|
(root: str = '') -> None
|
68,460 |
detect_secrets.core.secrets_collection
|
__bool__
| null |
def __bool__(self) -> bool:
# This checks whether there are secrets, rather than just empty files.
# Empty files can occur with SecretsCollection subtraction.
return bool(list(self))
|
(self) -> bool
|
68,461 |
detect_secrets.core.secrets_collection
|
__eq__
|
:param strict: if strict, will return False even if secrets match
(e.g. if line numbers are different)
|
def __eq__(self, other: Any, strict: bool = False) -> bool:
"""
:param strict: if strict, will return False even if secrets match
(e.g. if line numbers are different)
"""
if not isinstance(other, SecretsCollection):
raise NotImplementedError
if self.files != other.files:
return False
for filename in self.files:
self_mapping = {
(secret.secret_hash, secret.type): secret for secret in self[filename]
}
other_mapping = {
(secret.secret_hash, secret.type): secret for secret in other[filename]
}
# Since PotentialSecret is hashable, we compare their identities through this.
if set(self_mapping.values()) != set(other_mapping.values()):
return False
if not strict:
continue
for secretA in self_mapping.values():
secretB = other_mapping[(secretA.secret_hash, secretA.type)]
valuesA = vars(secretA)
valuesA.pop('secret_value')
valuesB = vars(secretB)
valuesB.pop('secret_value')
if valuesA['line_number'] == 0 or valuesB['line_number'] == 0:
# If line numbers are not provided (for either one), then don't compare
# line numbers.
valuesA.pop('line_number')
valuesB.pop('line_number')
if valuesA != valuesB:
return False
return True
|
(self, other: Any, strict: bool = False) -> bool
|
68,462 |
detect_secrets.core.secrets_collection
|
__getitem__
| null |
def __getitem__(self, filename: str) -> Set[PotentialSecret]:
return self.data[filename]
|
(self, filename: str) -> Set[detect_secrets.core.potential_secret.PotentialSecret]
|
68,463 |
detect_secrets.core.secrets_collection
|
__init__
|
:param root: if specified, will scan as if the root was the value provided,
rather than the current working directory. We still store results as if
relative to root, since we're running as if it was in a different directory,
rather than scanning a different directory.
|
def __init__(self, root: str = '') -> None:
"""
:param root: if specified, will scan as if the root was the value provided,
rather than the current working directory. We still store results as if
relative to root, since we're running as if it was in a different directory,
rather than scanning a different directory.
"""
self.data: Dict[str, Set[PotentialSecret]] = defaultdict(set)
self.root = root
|
(self, root: str = '') -> NoneType
|
68,464 |
detect_secrets.core.secrets_collection
|
__iter__
| null |
def __iter__(self) -> Generator[Tuple[str, PotentialSecret], None, None]:
for filename in sorted(self.files):
secrets = self[filename]
# NOTE: If line numbers aren't supplied, they are supposed to default to 0.
for secret in sorted(
secrets,
key=lambda secret: (
getattr(secret, 'line_number', 0),
secret.secret_hash,
secret.type,
),
):
yield filename, secret
|
(self) -> Generator[Tuple[str, detect_secrets.core.potential_secret.PotentialSecret], NoneType, NoneType]
|
68,466 |
detect_secrets.core.secrets_collection
|
__setitem__
| null |
def __setitem__(self, filename: str, value: Set[PotentialSecret]) -> None:
self.data[filename] = value
|
(self, filename: str, value: Set[detect_secrets.core.potential_secret.PotentialSecret]) -> NoneType
|
68,467 |
detect_secrets.core.secrets_collection
|
__sub__
|
This behaves like set subtraction.
|
def __sub__(self, other: Any) -> 'SecretsCollection':
"""This behaves like set subtraction."""
if not isinstance(other, SecretsCollection):
raise NotImplementedError
# We want to create a copy to follow convention and adhere to the principle
# of least surprise.
output = SecretsCollection()
for filename in other.files:
if filename not in self.files:
continue
output[filename] = self[filename] - other[filename]
for filename in self.files:
if filename in other.files:
continue
output[filename] = self[filename]
return output
|
(self, other: Any) -> detect_secrets.core.secrets_collection.SecretsCollection
|
68,468 |
detect_secrets.core.secrets_collection
|
exactly_equals
| null |
def exactly_equals(self, other: Any) -> bool:
return self.__eq__(other, strict=True) # type: ignore
|
(self, other: Any) -> bool
|
68,469 |
detect_secrets.core.secrets_collection
|
json
|
Custom JSON encoder
|
def json(self) -> Dict[str, Any]:
"""Custom JSON encoder"""
output = defaultdict(list)
for filename, secret in self:
output[filename].append(secret.json())
return dict(output)
|
(self) -> Dict[str, Any]
|
68,470 |
detect_secrets.core.secrets_collection
|
merge
|
We operate under an assumption that the latest results are always more accurate,
assuming that the baseline is created on the same repository. However, we cannot
merely discard the old results in favor of the new, since there is valuable information
that ought to be preserved: verification of secrets, both automated and manual.
Therefore, this function serves to extract this information from the old results,
and amend the new results with it.
|
def merge(self, old_results: 'SecretsCollection') -> None:
"""
We operate under an assumption that the latest results are always more accurate,
assuming that the baseline is created on the same repository. However, we cannot
merely discard the old results in favor of the new, since there is valuable information
that ought to be preserved: verification of secrets, both automated and manual.
Therefore, this function serves to extract this information from the old results,
and amend the new results with it.
"""
for filename in old_results.files:
if filename not in self.files:
continue
# This allows us to obtain the same secret, by accessing the hash.
mapping = {
secret: secret
for secret in self.data[filename]
}
for old_secret in old_results.data[filename]:
if old_secret not in mapping:
continue
# Only override if there's no newer value.
if mapping[old_secret].is_secret is None:
mapping[old_secret].is_secret = old_secret.is_secret
# If the old value is false, it won't make a difference.
if not mapping[old_secret].is_verified:
mapping[old_secret].is_verified = old_secret.is_verified
|
(self, old_results: detect_secrets.core.secrets_collection.SecretsCollection) -> NoneType
|
68,471 |
detect_secrets.core.secrets_collection
|
scan_diff
|
:raises: UnidiffParseError
|
def scan_diff(self, diff: str) -> None:
"""
:raises: UnidiffParseError
"""
try:
for secret in scan.scan_diff(diff):
self[secret.filename].add(secret)
except ImportError: # pragma: no cover
raise NotImplementedError(
'SecretsCollection.scan_diff requires `unidiff` to work. Try pip '
'installing that package, and try again.',
)
|
(self, diff: str) -> NoneType
|
68,472 |
detect_secrets.core.secrets_collection
|
scan_file
| null |
def scan_file(self, filename: str) -> None:
for secret in scan.scan_file(os.path.join(self.root, convert_local_os_path(filename))):
self[convert_local_os_path(filename)].add(secret)
|
(self, filename: str) -> NoneType
|
68,473 |
detect_secrets.core.secrets_collection
|
scan_files
|
Just like scan_file, but optimized through parallel processing.
|
def scan_files(self, *filenames: str, num_processors: Optional[int] = None) -> None:
"""Just like scan_file, but optimized through parallel processing."""
if len(filenames) == 1:
self.scan_file(filenames[0])
return
if not num_processors:
num_processors = mp.cpu_count()
child_process_settings = get_settings().json()
with mp.Pool(
processes=num_processors,
initializer=configure_settings_from_baseline,
initargs=(child_process_settings,),
) as p:
for secrets in p.imap_unordered(
_scan_file_and_serialize,
[os.path.join(self.root, filename) for filename in filenames],
):
for secret in secrets:
self[os.path.relpath(secret.filename, self.root)].add(secret)
|
(self, *filenames: str, num_processors: Optional[int] = None) -> NoneType
|
68,474 |
detect_secrets.core.secrets_collection
|
trim
|
Removes invalid entries in the current SecretsCollection.
This behaves *kinda* like set intersection and left-join. That is, for matching files,
a set intersection is performed. For non-matching files, only the files in `self` will
be kept.
This is because we may not be constructing the other SecretsCollection with the same
information as we are with the current SecretsCollection, and we cannot infer based on
incomplete information. As such, we will keep the status quo.
Assumptions:
1. Both `scanned_results` and the current SecretsCollection are constructed using
the same settings (otherwise, we can't determine whether a missing secret is due
to newly filtered secrets, or actually removed).
:param scanned_results: if None, will just clear out non-existent files.
:param filelist: files without secrets are not present in `scanned_results`. Therefore,
by supplying this additional filelist, we can assert that if an entry is missing in
`scanned_results`, it must not have secrets in it.
|
def trim(
self,
scanned_results: Optional['SecretsCollection'] = None,
filelist: Optional[List[str]] = None,
) -> None:
"""
Removes invalid entries in the current SecretsCollection.
This behaves *kinda* like set intersection and left-join. That is, for matching files,
a set intersection is performed. For non-matching files, only the files in `self` will
be kept.
This is because we may not be constructing the other SecretsCollection with the same
information as we are with the current SecretsCollection, and we cannot infer based on
incomplete information. As such, we will keep the status quo.
Assumptions:
1. Both `scanned_results` and the current SecretsCollection are constructed using
the same settings (otherwise, we can't determine whether a missing secret is due
to newly filtered secrets, or actually removed).
:param scanned_results: if None, will just clear out non-existent files.
:param filelist: files without secrets are not present in `scanned_results`. Therefore,
by supplying this additional filelist, we can assert that if an entry is missing in
`scanned_results`, it must not have secrets in it.
"""
if scanned_results is None:
scanned_results = SecretsCollection()
filelist = [
filename
for filename in self.files
if not os.path.exists(filename)
]
if not filelist:
fileset = set()
else:
fileset = set(filelist)
# Unfortunately, we can't merely do a set intersection since we want to update the line
# numbers (if applicable). Therefore, this does it manually.
result: Dict[str, Set[PotentialSecret]] = defaultdict(set)
for filename in scanned_results.files:
if filename not in self.files:
continue
# We construct this so we can get O(1) retrieval of secrets.
existing_secret_map = {secret: secret for secret in self[filename]}
for secret in scanned_results[filename]:
if secret not in existing_secret_map:
continue
# Currently, we assume that the `scanned_results` have no labelled data, so
# we only want to obtain the latest line number from it.
existing_secret = existing_secret_map[secret]
if existing_secret.line_number:
# Only update line numbers if we're tracking them.
existing_secret.line_number = secret.line_number
result[filename].add(existing_secret)
for filename in self.files:
# If this is already populated by scanned_results, then the set intersection
# is already completed.
if filename in result:
continue
# All secrets relating to that file was removed.
# We know this because:
# 1. It's a file that was scanned (in filelist)
# 2. It would have been in the baseline, if there were secrets...
# 3. ...but it isn't.
if filename in fileset:
continue
result[filename] = self[filename]
self.data = result
|
(self, scanned_results: Optional[detect_secrets.core.secrets_collection.SecretsCollection] = None, filelist: Optional[List[str]] = None) -> NoneType
|
68,484 |
exe_kg_lib.classes.exe_kg
|
ExeKG
| null |
class ExeKG:
def __init__(self, input_exe_kg_path: str = None):
"""
Args:
input_exe_kg_path: path of KG to be executed
acts as switch for KG execution mode (if filled, mode is on)
"""
self.top_level_schema = KGSchema.from_schema_info(KG_SCHEMAS["Data Science"]) # top-level KG schema
self.bottom_level_schemata = {}
# top-level KG schema entities
self.atomic_task = Entity(self.top_level_schema.namespace.AtomicTask)
self.atomic_method = Entity(self.top_level_schema.namespace.AtomicMethod)
self.data_entity = Entity(self.top_level_schema.namespace.DataEntity)
self.pipeline = Entity(self.top_level_schema.namespace.Pipeline)
self.data = Entity(self.top_level_schema.namespace.Data)
self.data_semantics = Entity(self.top_level_schema.namespace.DataSemantics)
self.data_structure = Entity(self.top_level_schema.namespace.DataStructure)
# self.input_kg: KG eventually filled with 3 KG schemas and the input executable KG in case of KG execution
self.input_kg = Graph(bind_namespaces="rdflib")
if input_exe_kg_path: # KG execution mode
self.input_kg.parse(input_exe_kg_path, format="n3") # parse input executable KG
check_kg_executability(self.input_kg)
all_ns = [n for n in self.input_kg.namespace_manager.namespaces()]
bottom_level_schema_info_set = False # flag indicating that a bottom-level schema was found
for schema_name, schema_info in KG_SCHEMAS.items(): # search for used bottom-level schema
if (
schema_name == "Data Science" # or schema_name == "Visualization"
): # skip top-level KG schema and Visualization schema that is always used
continue
if (schema_info["namespace_prefix"], URIRef(schema_info["namespace"])) in all_ns:
# bottom-level schema found
self.bottom_level_schemata[schema_info["namespace_prefix"]] = KGSchema.from_schema_info(schema_info)
bottom_level_schema_info_set = True
if not bottom_level_schema_info_set: # no bottom-level schema found, input executable KG is invalid
print("Input executable KG did not have any bottom level KG schemas")
exit(1)
else: # KG construction mode
for schema_name, schema_info in KG_SCHEMAS.items(): # search for used bottom-level schema
if (
schema_name == "Data Science" # or schema_name == "Visualization"
): # skip top-level KG schema and Visualization schema that is always used
continue
self.bottom_level_schemata[schema_info["namespace_prefix"]] = KGSchema.from_schema_info(schema_info)
bottom_level_schemata_kgs = [kg_schema.kg for kg_schema in self.bottom_level_schemata.values()]
self.input_kg += self.top_level_schema.kg # + self.visu_schema.kg # combine all KG schemas in input KG
for bottom_level_schema_kg in bottom_level_schemata_kgs:
self.input_kg += bottom_level_schema_kg
self.output_kg = Graph(bind_namespaces="rdflib") # KG to be filled while constructing executable KG
self._bind_used_namespaces([self.input_kg, self.output_kg])
# below variables are filled in self._parse_kgs()
self.task_type_dict = {} # dict for uniquely naming each new pipeline task
self.method_type_dict = {} # dict for uniquely naming each new pipeline method
self.atomic_task_list = [] # list for storing the available sub-classes of ds:AtomicTask
self.atomic_method_list = [] # list for storing the available sub-classes of ds:AtomicMethod
self.data_type_list = [] # list for storing the available sub-classes of ds:DataEntity
self.data_semantics_list = [] # list for storing the available sub-classes of ds:DataSemantics
self.data_structure_list = [] # list for storing the available sub-classes of ds:DataStructure
self.existing_data_entity_list = (
[]
) # contains existing data entities that are output entities of previous tasks during KG construction
self.last_created_task = (
None # last created pipeline task, for connecting consecutive pipeline tasks during KG construction
)
self.canvas_task_created = False # indicates if canvas task was created during KG construction, and used for hiding the other Visualization tasks in CLI
self._parse_kgs()
def _bind_used_namespaces(self, kgs: List[Graph]):
"""
Binds top-level, bottom-level and Visualization KG schemas' namespaces with their prefixes
Adds these bindings to the Graphs of kgs list
Args:
kgs: list of Graph objects to which the namespace bindings are added
"""
for kg in kgs:
kg.bind(self.top_level_schema.namespace_prefix, self.top_level_schema.namespace)
for bottom_level_kg_schema in self.bottom_level_schemata.values():
kg.bind(
bottom_level_kg_schema.namespace_prefix,
bottom_level_kg_schema.namespace,
)
def _parse_kgs(self) -> None:
"""
Fills lists with subclasses of top-level KG schema classes and initializes dicts used for unique naming
"""
atomic_task_subclasses = get_subclasses_of(self.atomic_task.iri, self.input_kg)
for t in list(atomic_task_subclasses):
task = Entity(t[0], self.atomic_task)
self.atomic_task_list.append(task)
self.task_type_dict[task.name] = 1
atomic_method_subclasses = get_subclasses_of(self.atomic_method.iri, self.input_kg)
for m in list(atomic_method_subclasses):
method = Entity(m[0], self.atomic_method)
self.atomic_method_list.append(method)
self.method_type_dict[method.name] = 1
data_type_subclasses = get_subclasses_of(self.data_entity.iri, self.input_kg)
for d in list(data_type_subclasses):
data_type = Entity(d[0], self.data_entity)
self.data_type_list.append(data_type)
data_semantics_subclasses = get_subclasses_of(self.data_semantics.iri, self.top_level_schema.kg)
for d in list(data_semantics_subclasses):
if d[0] == self.data_entity.iri:
continue
data_semantics = Entity(d[0], self.data_semantics)
self.data_semantics_list.append(data_semantics)
data_structure_subclasses = get_subclasses_of(self.data_structure.iri, self.top_level_schema.kg)
for d in list(data_structure_subclasses):
if d[0] == self.data_entity.iri:
continue
data_structure = Entity(d[0], self.data_structure)
self.data_structure_list.append(data_structure)
def create_pipeline_task(self, pipeline_name: str, input_data_path: str) -> Task:
"""
Instantiates and adds a new pipeline task entity to self.output_kg
Args:
pipeline_name: name for the pipeline
input_data_path: path for the input data to be used by the pipeline's tasks
Returns:
Task: created pipeline
"""
pipeline = create_pipeline_task(
self.top_level_schema.namespace,
self.pipeline,
self.output_kg,
pipeline_name,
input_data_path,
)
self.last_created_task = pipeline
return pipeline
def create_data_entity(
self,
name: str,
source_value: str,
data_semantics_name: str,
data_structure_name: str,
) -> DataEntity:
"""
Creates a DataEntity object
Args:
name: name of the data entity
source_value: name of the data source corresponding to a column of the data
data_semantics_name: name of the data semantics entity
data_structure_name: name of the data structure entity
Returns:
DataEntity: object initialized with the given parameter values
"""
return DataEntity(
self.top_level_schema.namespace + name,
self.data_entity,
source_value,
self.top_level_schema.namespace + data_semantics_name,
self.top_level_schema.namespace + data_structure_name,
)
def add_task(
self,
kg_schema_short: str,
task: str,
input_data_entity_dict: Dict[str, List[DataEntity]],
method: str,
properties_dict: Dict[str, Union[str, int, float]],
) -> Task:
"""
Instantiates and adds a new task entity to self.output_kg
Components attached to the task during creation: input and output data entities, and a method with properties
Args:
kg_schema_short: abbreviated name of the KG schema in which the task and method belong
task: task name
input_data_entity_dict: keys -> input names of the specified task
values -> lists of DataEntity objects to be added as input to the task
method: method name
properties_dict: keys -> property names of the specified method
values -> values to be added as parameters to the method
Returns:
Task: object of the created task
"""
kg_schema_to_use = self.bottom_level_schemata[kg_schema_short]
relation_iri = (
self.top_level_schema.namespace.hasNextTask
if self.last_created_task.type != "Pipeline"
else self.top_level_schema.namespace.hasStartTask
) # use relation depending on the previous task
# instantiate task and link it with the previous one
parent_task = Task(kg_schema_to_use.namespace + task, self.atomic_task)
added_entity = add_instance_from_parent_with_relation(
kg_schema_to_use.namespace,
self.output_kg,
parent_task,
relation_iri,
self.last_created_task,
name_instance(self.task_type_dict, self.method_type_dict, parent_task),
)
next_task = Task.from_entity(added_entity) # create Task object from Entity object
# instantiate and add given input data entities to the task
self._add_inputs_to_task(kg_schema_to_use.namespace, next_task, input_data_entity_dict)
# instantiate and add output data entities to the task, as specified in the KG schema
self._add_outputs_to_task(next_task)
method_parent = Entity(kg_schema_to_use.namespace + method, self.atomic_method)
# fetch compatible methods and their properties from KG schema
results = list(
get_method_properties_and_methods(
self.input_kg,
self.top_level_schema.namespace_prefix,
next_task.parent_entity.iri,
)
)
chosen_property_method = next(
filter(lambda pair: pair[1].split("#")[1] == method, results), None
) # match given method_type with query result
if chosen_property_method is None:
print(f"Property connecting task of type {task} with method of type {method} not found")
exit(1)
# instantiate method and link it with the task using the appropriate chosen_property_method[0] relation
method_entity = add_instance_from_parent_with_relation(
kg_schema_to_use.namespace,
self.output_kg,
method_parent,
chosen_property_method[0],
next_task,
name_instance(self.task_type_dict, self.method_type_dict, method_parent),
)
# fetch compatible data properties from KG schema
property_list = get_data_properties_plus_inherited_by_class_iri(self.input_kg, method_parent.iri)
# add data properties to the task with given values
for pair in property_list:
property_iri = pair[0]
property_name = property_iri.split("#")[1]
range_iri = pair[1]
input_property = Literal(
lexical_or_value=properties_dict[property_name],
datatype=range_iri,
)
add_literal(self.output_kg, method_entity, property_iri, input_property)
self.last_created_task = next_task # store created task
return next_task
def _add_inputs_to_task(
self,
namespace: Namespace,
task_entity: Task,
input_data_entity_dict: Dict[str, List[DataEntity]] = None,
) -> None:
"""
Instantiates and adds given input data entities to the given task of self.output_kg
if input_data_entity_dict is None, user is asked to specify input data entities
Args:
task_entity: the task to add the input to
input_data_entity_dict: keys -> input entity names corresponding to the given task as defined in the chosen bottom-level KG schema
values -> list of corresponding data entities to be added as input to the task
"""
use_cli = input_data_entity_dict is None
# fetch compatible inputs from KG schema
results = list(
get_input_properties_and_inputs(
self.input_kg,
self.top_level_schema.namespace_prefix,
task_entity.parent_entity.iri,
)
)
# task_type_index was incremented when creating the task entity
# reset the index to match the currently created task's index
task_type_index = self.task_type_dict[task_entity.type] - 1
for _, input_entity_iri, data_structure_iri in results:
input_entity_name = input_entity_iri.split("#")[1]
if not use_cli:
input_data_entity_list = input_data_entity_dict[input_entity_name]
else:
# use CLI
print(f"Specify input corresponding to {input_entity_name}")
input_data_entity_list = get_input_for_existing_data_entities(self.existing_data_entity_list)
input_data_entity_list += get_input_for_new_data_entities(
self.data_semantics_list,
self.data_structure_list,
namespace,
self.data_entity,
)
same_input_index = 1
for input_data_entity in input_data_entity_list:
# instantiate data entity corresponding to the found input_entity_name
data_entity_iri = input_entity_iri + str(task_type_index) + "_" + str(same_input_index)
# instantiate given data entity
add_data_entity_instance(
self.output_kg,
self.data,
self.top_level_schema.kg,
self.top_level_schema.namespace,
input_data_entity,
)
# instantiate and attach data entity with reference to the given data entity
data_entity = DataEntity(
data_entity_iri,
DataEntity(input_entity_iri, self.data_entity),
has_reference=input_data_entity.iri,
has_data_structure_iri=data_structure_iri,
)
add_and_attach_data_entity(
self.output_kg,
self.data,
self.top_level_schema.kg,
self.top_level_schema.namespace,
data_entity,
self.top_level_schema.namespace.hasInput,
task_entity,
)
task_entity.input_dict[input_entity_name] = data_entity
same_input_index += 1
if use_cli:
check_kg_executability(self.output_kg)
def _add_outputs_to_task(self, task_entity: Task) -> None:
"""
Instantiates and adds output data entities to the given task of self.output_kg, based on the task's definition in the KG schema
Args:
task_entity: the task to add the output to
"""
# fetch compatible outputs from KG schema
results = list(
get_output_properties_and_outputs(
self.input_kg,
self.top_level_schema.namespace_prefix,
task_entity.parent_entity.iri,
)
)
# task_type_index was incremented when creating the task entity
# reset the index to match the currently created task's index
task_type_index = self.task_type_dict[task_entity.type] - 1
for output_property, output_parent_entity_iri, data_structure_iri in results:
# instantiate and add data entity
output_data_entity_iri = output_parent_entity_iri + str(task_type_index)
output_data_entity = DataEntity(
output_data_entity_iri,
DataEntity(output_parent_entity_iri, self.data_entity),
has_data_structure_iri=data_structure_iri,
)
add_and_attach_data_entity(
self.output_kg,
self.data,
self.top_level_schema.kg,
self.top_level_schema.namespace,
output_data_entity,
self.top_level_schema.namespace.hasOutput,
task_entity,
)
task_entity.output_dict[output_parent_entity_iri.split("#")[1]] = output_data_entity
self.existing_data_entity_list.append(output_data_entity)
def _create_next_task_cli(self) -> Union[None, Task]:
"""
Instantiates and adds task (without method) based on user input to self.output_kg
Adds task's output data entities to self.existing_data_entity_list
Returns:
None: in case user wants to end the pipeline creation
Task: object of the created task
"""
print("Please choose the next task")
for i, t in enumerate(self.atomic_task_list):
if not self.canvas_task_created and t.name == "PlotTask":
continue
if self.canvas_task_created and t.name == "CanvasTask":
continue
print(f"\t{str(i)}. {t.name}")
print(f"\t{str(-1)}. End pipeline")
next_task_id = int(input())
if next_task_id == -1:
return None
next_task_parent = self.atomic_task_list[next_task_id]
relation_iri = (
self.top_level_schema.namespace.hasNextTask
if self.last_created_task.type != "Pipeline"
else self.top_level_schema.namespace.hasStartTask
) # use relation depending on the previous task
# instantiate task and link it with the previous one
task_entity = add_instance_from_parent_with_relation(
next_task_parent.namespace,
self.output_kg,
next_task_parent,
relation_iri,
self.last_created_task,
name_instance(self.task_type_dict, self.method_type_dict, next_task_parent),
)
task_entity = Task(task_entity.iri, task_entity.parent_entity) # create Task object from Entity object's info
# instantiate and add input data entities to the task based on user input
self._add_inputs_to_task(next_task_parent.namespace, task_entity)
# instantiate and add output data entities to the task, as specified in the KG schema
self._add_outputs_to_task(task_entity)
self.last_created_task = task_entity
if task_entity.type == "CanvasTask":
self.canvas_task_created = True
return task_entity
def _create_method(self, task_to_attach_to: Entity) -> None:
"""
Instantiate and attach method to task of self.output_kg
Args:
task_to_attach_to: the task to attach the created method to
"""
print(f"Please choose a method for {task_to_attach_to.type}:")
# fetch compatible methods and their properties from KG schema
results = list(
get_method_properties_and_methods(
self.input_kg,
self.top_level_schema.namespace_prefix,
task_to_attach_to.parent_entity.iri,
)
)
for i, pair in enumerate(results):
tmp_method = pair[1].split("#")[1]
print(f"\t{str(i)}. {tmp_method}")
method_id = int(input())
selected_property_and_method = results[method_id]
method_parent = next(
filter(
lambda m: m.iri == selected_property_and_method[1],
self.atomic_method_list,
),
None,
)
# instantiate method and link it with the task using the appropriate selected_property_and_method[0] relation
add_instance_from_parent_with_relation(
task_to_attach_to.namespace,
self.output_kg,
method_parent,
selected_property_and_method[0],
task_to_attach_to,
name_instance(self.task_type_dict, self.method_type_dict, method_parent),
)
# fetch compatible data properties from KG schema
property_list = get_data_properties_plus_inherited_by_class_iri(self.input_kg, method_parent.iri)
if property_list:
print(f"Please enter requested properties for {method_parent.name}:")
# add data properties to the task with given values
for pair in property_list:
property_instance = URIRef(pair[0])
range = pair[1].split("#")[1]
range_iri = pair[1]
input_property = Literal(
lexical_or_value=input("\t{} in range({}): ".format(pair[0].split("#")[1], range)),
datatype=range_iri,
)
add_literal(self.output_kg, task_to_attach_to, property_instance, input_property)
check_kg_executability(self.output_kg)
def start_pipeline_creation(self, pipeline_name: str, input_data_path: str) -> None:
"""
Handles the pipeline creation through CLI
Args:
pipeline_name: name for the pipeline
input_data_path: path for the input data to be used by the pipeline's tasks
"""
pipeline = create_pipeline_task(
self.top_level_schema.namespace,
self.pipeline,
self.output_kg,
pipeline_name,
input_data_path,
)
self.last_created_task = pipeline
while True:
next_task = self._create_next_task_cli()
if next_task is None:
break
self._create_method(next_task)
def save_created_kg(self, file_path: str) -> None:
"""
Saves self.output_kg to a file
Args:
file_path: path of the output file
"""
check_kg_executability(self.output_kg)
dir_path = os.path.dirname(file_path)
os.makedirs(dir_path, exist_ok=True)
self.output_kg.serialize(destination=file_path)
print(f"Executable KG saved in {file_path}")
def _property_value_to_field_value(self, property_value: str) -> Union[str, DataEntity]:
"""
Converts property value to Python class field value
If property_value is not a data entity's IRI, it is returned as is
Else, its property values are converted recursively and stored in a DataEntity object
Args:
property_value: value of the property as found in KG
Returns:
str: property_value parameter as is
DataEntity: object containing parsed data entity properties
"""
if "#" in property_value:
data_entity = self._parse_data_entity_by_iri(property_value)
if data_entity is None:
return property_value
return data_entity
return property_value
def _parse_data_entity_by_iri(self, in_out_data_entity_iri: str) -> Optional[DataEntity]:
"""
Parses an input or output data entity of self.input_kg and stores the parsed info in a Python object
Args:
in_out_data_entity_iri: IRI of the KG entity to parse
Returns:
None: if given IRI does not belong to an instance of a sub-class of self.top_level_schema.namespace.DataEntity
DataEntity: object with data entity's parsed properties
"""
# fetch type of entity with given IRI
query_result = get_first_query_result_if_exists(
query_entity_parent_iri,
self.input_kg,
in_out_data_entity_iri,
self.top_level_schema.namespace.DataEntity,
)
if query_result is None:
return None
data_entity_parent_iri = str(query_result[0])
# fetch IRI of data entity that is referenced by the given entity
query_result = get_first_query_result_if_exists(
query_data_entity_reference_iri,
self.input_kg,
self.top_level_schema.namespace_prefix,
in_out_data_entity_iri,
)
if query_result is None: # no referenced data entity found
data_entity_ref_iri = in_out_data_entity_iri
else:
data_entity_ref_iri = str(query_result[0])
# create DataEntity object to store all the parsed properties
data_entity = DataEntity(in_out_data_entity_iri, Entity(data_entity_parent_iri))
data_entity.has_reference = data_entity_ref_iri.split("#")[1]
for s, p, o in self.input_kg.triples((URIRef(data_entity_ref_iri), None, None)):
# parse property name and value
field_name = property_name_to_field_name(str(p))
if not hasattr(data_entity, field_name) or field_name == "type":
continue
field_value = self._property_value_to_field_value(str(o))
setattr(data_entity, field_name, field_value) # set field value dynamically
return data_entity
def _parse_task_by_iri(self, task_iri: str, canvas_method: visual_tasks.CanvasTaskCanvasMethod = None) -> Task:
"""
Parses a task of self.input_kg and stores the info in an object of a sub-class of Task
The sub-class name and the object's fields are mapped dynamically based on the found KG components
Args:
task_iri: IRI of the task to be parsed
canvas_method: optional object to pass as argument for task object initialization
Returns:
Task: object of a sub-class of Task, containing all the parsed info
"""
# fetch type of entity with given IRI
query_result = get_first_query_result_if_exists(
query_entity_parent_iri,
self.input_kg,
task_iri,
self.top_level_schema.namespace.AtomicTask,
)
if (
query_result is None
): # given IRI does not belong to an instance of a sub-class of self.top_level_schema.namespace.AtomicTask
print(f"Cannot retrieve parent of task with iri {task_iri}. Exiting...")
exit(1)
task_parent_iri = str(query_result[0])
task = Task(task_iri, Task(task_parent_iri))
method = get_method_by_task_iri(
self.input_kg,
self.top_level_schema.namespace_prefix,
self.top_level_schema.namespace,
task_iri,
)
if method is None:
print(f"Cannot retrieve method for task with iri: {task_iri}")
# perform automatic mapping of KG task class to Python sub-class
class_name = task.type + method.type
Class = getattr(visual_tasks, class_name, None)
if Class is None:
Class = getattr(statistic_tasks, class_name, None)
if Class is None:
Class = getattr(ml_tasks, class_name, None)
# create Task sub-class object
if canvas_method:
task = Class(task_iri, Task(task_parent_iri), canvas_method)
else:
task = Class(task_iri, Task(task_parent_iri))
task_related_triples = self.input_kg.triples((URIRef(task_iri), None, None))
method_related_triples = self.input_kg.triples((URIRef(method.iri), None, None))
for s, p, o in itertools.chain(task_related_triples, method_related_triples):
# parse property name and value
field_name = property_name_to_field_name(str(p))
if not hasattr(task, field_name) or field_name == "type":
continue
field_value = self._property_value_to_field_value(str(o))
# set field value dynamically
if field_name == "has_input" or field_name == "has_output":
getattr(task, field_name).append(field_value)
else:
setattr(task, field_name, field_value)
return task
def execute_pipeline(self):
"""
Retrieves and executes pipeline by parsing self.input_kg
"""
pipeline_iri, input_data_path, next_task_iri = get_pipeline_and_first_task_iri(
self.input_kg, self.top_level_schema.namespace_prefix
)
input_data = pd.read_csv(input_data_path, delimiter=",", encoding="ISO-8859-1")
canvas_method = None # stores Task object that corresponds to a task of type CanvasTask
task_output_dict = {} # gradually filled with outputs of executed tasks
while next_task_iri is not None:
next_task = self._parse_task_by_iri(next_task_iri, canvas_method)
output = next_task.run_method(task_output_dict, input_data)
if output:
task_output_dict.update(output)
if next_task.type == "CanvasTask":
canvas_method = next_task
next_task_iri = next_task.has_next_task
|
(input_exe_kg_path: str = None)
|
68,485 |
exe_kg_lib.classes.exe_kg
|
__init__
|
Args:
input_exe_kg_path: path of KG to be executed
acts as switch for KG execution mode (if filled, mode is on)
|
def __init__(self, input_exe_kg_path: str = None):
"""
Args:
input_exe_kg_path: path of KG to be executed
acts as switch for KG execution mode (if filled, mode is on)
"""
self.top_level_schema = KGSchema.from_schema_info(KG_SCHEMAS["Data Science"]) # top-level KG schema
self.bottom_level_schemata = {}
# top-level KG schema entities
self.atomic_task = Entity(self.top_level_schema.namespace.AtomicTask)
self.atomic_method = Entity(self.top_level_schema.namespace.AtomicMethod)
self.data_entity = Entity(self.top_level_schema.namespace.DataEntity)
self.pipeline = Entity(self.top_level_schema.namespace.Pipeline)
self.data = Entity(self.top_level_schema.namespace.Data)
self.data_semantics = Entity(self.top_level_schema.namespace.DataSemantics)
self.data_structure = Entity(self.top_level_schema.namespace.DataStructure)
# self.input_kg: KG eventually filled with 3 KG schemas and the input executable KG in case of KG execution
self.input_kg = Graph(bind_namespaces="rdflib")
if input_exe_kg_path: # KG execution mode
self.input_kg.parse(input_exe_kg_path, format="n3") # parse input executable KG
check_kg_executability(self.input_kg)
all_ns = [n for n in self.input_kg.namespace_manager.namespaces()]
bottom_level_schema_info_set = False # flag indicating that a bottom-level schema was found
for schema_name, schema_info in KG_SCHEMAS.items(): # search for used bottom-level schema
if (
schema_name == "Data Science" # or schema_name == "Visualization"
): # skip top-level KG schema and Visualization schema that is always used
continue
if (schema_info["namespace_prefix"], URIRef(schema_info["namespace"])) in all_ns:
# bottom-level schema found
self.bottom_level_schemata[schema_info["namespace_prefix"]] = KGSchema.from_schema_info(schema_info)
bottom_level_schema_info_set = True
if not bottom_level_schema_info_set: # no bottom-level schema found, input executable KG is invalid
print("Input executable KG did not have any bottom level KG schemas")
exit(1)
else: # KG construction mode
for schema_name, schema_info in KG_SCHEMAS.items(): # search for used bottom-level schema
if (
schema_name == "Data Science" # or schema_name == "Visualization"
): # skip top-level KG schema and Visualization schema that is always used
continue
self.bottom_level_schemata[schema_info["namespace_prefix"]] = KGSchema.from_schema_info(schema_info)
bottom_level_schemata_kgs = [kg_schema.kg for kg_schema in self.bottom_level_schemata.values()]
self.input_kg += self.top_level_schema.kg # + self.visu_schema.kg # combine all KG schemas in input KG
for bottom_level_schema_kg in bottom_level_schemata_kgs:
self.input_kg += bottom_level_schema_kg
self.output_kg = Graph(bind_namespaces="rdflib") # KG to be filled while constructing executable KG
self._bind_used_namespaces([self.input_kg, self.output_kg])
# below variables are filled in self._parse_kgs()
self.task_type_dict = {} # dict for uniquely naming each new pipeline task
self.method_type_dict = {} # dict for uniquely naming each new pipeline method
self.atomic_task_list = [] # list for storing the available sub-classes of ds:AtomicTask
self.atomic_method_list = [] # list for storing the available sub-classes of ds:AtomicMethod
self.data_type_list = [] # list for storing the available sub-classes of ds:DataEntity
self.data_semantics_list = [] # list for storing the available sub-classes of ds:DataSemantics
self.data_structure_list = [] # list for storing the available sub-classes of ds:DataStructure
self.existing_data_entity_list = (
[]
) # contains existing data entities that are output entities of previous tasks during KG construction
self.last_created_task = (
None # last created pipeline task, for connecting consecutive pipeline tasks during KG construction
)
self.canvas_task_created = False # indicates if canvas task was created during KG construction, and used for hiding the other Visualization tasks in CLI
self._parse_kgs()
|
(self, input_exe_kg_path: Optional[str] = None)
|
68,486 |
exe_kg_lib.classes.exe_kg
|
_add_inputs_to_task
|
Instantiates and adds given input data entities to the given task of self.output_kg
if input_data_entity_dict is None, user is asked to specify input data entities
Args:
task_entity: the task to add the input to
input_data_entity_dict: keys -> input entity names corresponding to the given task as defined in the chosen bottom-level KG schema
values -> list of corresponding data entities to be added as input to the task
|
def _add_inputs_to_task(
self,
namespace: Namespace,
task_entity: Task,
input_data_entity_dict: Dict[str, List[DataEntity]] = None,
) -> None:
"""
Instantiates and adds given input data entities to the given task of self.output_kg
if input_data_entity_dict is None, user is asked to specify input data entities
Args:
task_entity: the task to add the input to
input_data_entity_dict: keys -> input entity names corresponding to the given task as defined in the chosen bottom-level KG schema
values -> list of corresponding data entities to be added as input to the task
"""
use_cli = input_data_entity_dict is None
# fetch compatible inputs from KG schema
results = list(
get_input_properties_and_inputs(
self.input_kg,
self.top_level_schema.namespace_prefix,
task_entity.parent_entity.iri,
)
)
# task_type_index was incremented when creating the task entity
# reset the index to match the currently created task's index
task_type_index = self.task_type_dict[task_entity.type] - 1
for _, input_entity_iri, data_structure_iri in results:
input_entity_name = input_entity_iri.split("#")[1]
if not use_cli:
input_data_entity_list = input_data_entity_dict[input_entity_name]
else:
# use CLI
print(f"Specify input corresponding to {input_entity_name}")
input_data_entity_list = get_input_for_existing_data_entities(self.existing_data_entity_list)
input_data_entity_list += get_input_for_new_data_entities(
self.data_semantics_list,
self.data_structure_list,
namespace,
self.data_entity,
)
same_input_index = 1
for input_data_entity in input_data_entity_list:
# instantiate data entity corresponding to the found input_entity_name
data_entity_iri = input_entity_iri + str(task_type_index) + "_" + str(same_input_index)
# instantiate given data entity
add_data_entity_instance(
self.output_kg,
self.data,
self.top_level_schema.kg,
self.top_level_schema.namespace,
input_data_entity,
)
# instantiate and attach data entity with reference to the given data entity
data_entity = DataEntity(
data_entity_iri,
DataEntity(input_entity_iri, self.data_entity),
has_reference=input_data_entity.iri,
has_data_structure_iri=data_structure_iri,
)
add_and_attach_data_entity(
self.output_kg,
self.data,
self.top_level_schema.kg,
self.top_level_schema.namespace,
data_entity,
self.top_level_schema.namespace.hasInput,
task_entity,
)
task_entity.input_dict[input_entity_name] = data_entity
same_input_index += 1
if use_cli:
check_kg_executability(self.output_kg)
|
(self, namespace: rdflib.namespace.Namespace, task_entity: exe_kg_lib.classes.task.Task, input_data_entity_dict: Optional[Dict[str, List[exe_kg_lib.classes.data_entity.DataEntity]]] = None) -> NoneType
|
68,487 |
exe_kg_lib.classes.exe_kg
|
_add_outputs_to_task
|
Instantiates and adds output data entities to the given task of self.output_kg, based on the task's definition in the KG schema
Args:
task_entity: the task to add the output to
|
def _add_outputs_to_task(self, task_entity: Task) -> None:
"""
Instantiates and adds output data entities to the given task of self.output_kg, based on the task's definition in the KG schema
Args:
task_entity: the task to add the output to
"""
# fetch compatible outputs from KG schema
results = list(
get_output_properties_and_outputs(
self.input_kg,
self.top_level_schema.namespace_prefix,
task_entity.parent_entity.iri,
)
)
# task_type_index was incremented when creating the task entity
# reset the index to match the currently created task's index
task_type_index = self.task_type_dict[task_entity.type] - 1
for output_property, output_parent_entity_iri, data_structure_iri in results:
# instantiate and add data entity
output_data_entity_iri = output_parent_entity_iri + str(task_type_index)
output_data_entity = DataEntity(
output_data_entity_iri,
DataEntity(output_parent_entity_iri, self.data_entity),
has_data_structure_iri=data_structure_iri,
)
add_and_attach_data_entity(
self.output_kg,
self.data,
self.top_level_schema.kg,
self.top_level_schema.namespace,
output_data_entity,
self.top_level_schema.namespace.hasOutput,
task_entity,
)
task_entity.output_dict[output_parent_entity_iri.split("#")[1]] = output_data_entity
self.existing_data_entity_list.append(output_data_entity)
|
(self, task_entity: exe_kg_lib.classes.task.Task) -> NoneType
|
68,488 |
exe_kg_lib.classes.exe_kg
|
_bind_used_namespaces
|
Binds top-level, bottom-level and Visualization KG schemas' namespaces with their prefixes
Adds these bindings to the Graphs of kgs list
Args:
kgs: list of Graph objects to which the namespace bindings are added
|
def _bind_used_namespaces(self, kgs: List[Graph]):
"""
Binds top-level, bottom-level and Visualization KG schemas' namespaces with their prefixes
Adds these bindings to the Graphs of kgs list
Args:
kgs: list of Graph objects to which the namespace bindings are added
"""
for kg in kgs:
kg.bind(self.top_level_schema.namespace_prefix, self.top_level_schema.namespace)
for bottom_level_kg_schema in self.bottom_level_schemata.values():
kg.bind(
bottom_level_kg_schema.namespace_prefix,
bottom_level_kg_schema.namespace,
)
|
(self, kgs: List[rdflib.graph.Graph])
|
68,489 |
exe_kg_lib.classes.exe_kg
|
_create_method
|
Instantiate and attach method to task of self.output_kg
Args:
task_to_attach_to: the task to attach the created method to
|
def _create_method(self, task_to_attach_to: Entity) -> None:
"""
Instantiate and attach method to task of self.output_kg
Args:
task_to_attach_to: the task to attach the created method to
"""
print(f"Please choose a method for {task_to_attach_to.type}:")
# fetch compatible methods and their properties from KG schema
results = list(
get_method_properties_and_methods(
self.input_kg,
self.top_level_schema.namespace_prefix,
task_to_attach_to.parent_entity.iri,
)
)
for i, pair in enumerate(results):
tmp_method = pair[1].split("#")[1]
print(f"\t{str(i)}. {tmp_method}")
method_id = int(input())
selected_property_and_method = results[method_id]
method_parent = next(
filter(
lambda m: m.iri == selected_property_and_method[1],
self.atomic_method_list,
),
None,
)
# instantiate method and link it with the task using the appropriate selected_property_and_method[0] relation
add_instance_from_parent_with_relation(
task_to_attach_to.namespace,
self.output_kg,
method_parent,
selected_property_and_method[0],
task_to_attach_to,
name_instance(self.task_type_dict, self.method_type_dict, method_parent),
)
# fetch compatible data properties from KG schema
property_list = get_data_properties_plus_inherited_by_class_iri(self.input_kg, method_parent.iri)
if property_list:
print(f"Please enter requested properties for {method_parent.name}:")
# add data properties to the task with given values
for pair in property_list:
property_instance = URIRef(pair[0])
range = pair[1].split("#")[1]
range_iri = pair[1]
input_property = Literal(
lexical_or_value=input("\t{} in range({}): ".format(pair[0].split("#")[1], range)),
datatype=range_iri,
)
add_literal(self.output_kg, task_to_attach_to, property_instance, input_property)
check_kg_executability(self.output_kg)
|
(self, task_to_attach_to: exe_kg_lib.classes.entity.Entity) -> NoneType
|
68,490 |
exe_kg_lib.classes.exe_kg
|
_create_next_task_cli
|
Instantiates and adds task (without method) based on user input to self.output_kg
Adds task's output data entities to self.existing_data_entity_list
Returns:
None: in case user wants to end the pipeline creation
Task: object of the created task
|
def _create_next_task_cli(self) -> Union[None, Task]:
"""
Instantiates and adds task (without method) based on user input to self.output_kg
Adds task's output data entities to self.existing_data_entity_list
Returns:
None: in case user wants to end the pipeline creation
Task: object of the created task
"""
print("Please choose the next task")
for i, t in enumerate(self.atomic_task_list):
if not self.canvas_task_created and t.name == "PlotTask":
continue
if self.canvas_task_created and t.name == "CanvasTask":
continue
print(f"\t{str(i)}. {t.name}")
print(f"\t{str(-1)}. End pipeline")
next_task_id = int(input())
if next_task_id == -1:
return None
next_task_parent = self.atomic_task_list[next_task_id]
relation_iri = (
self.top_level_schema.namespace.hasNextTask
if self.last_created_task.type != "Pipeline"
else self.top_level_schema.namespace.hasStartTask
) # use relation depending on the previous task
# instantiate task and link it with the previous one
task_entity = add_instance_from_parent_with_relation(
next_task_parent.namespace,
self.output_kg,
next_task_parent,
relation_iri,
self.last_created_task,
name_instance(self.task_type_dict, self.method_type_dict, next_task_parent),
)
task_entity = Task(task_entity.iri, task_entity.parent_entity) # create Task object from Entity object's info
# instantiate and add input data entities to the task based on user input
self._add_inputs_to_task(next_task_parent.namespace, task_entity)
# instantiate and add output data entities to the task, as specified in the KG schema
self._add_outputs_to_task(task_entity)
self.last_created_task = task_entity
if task_entity.type == "CanvasTask":
self.canvas_task_created = True
return task_entity
|
(self) -> Optional[exe_kg_lib.classes.task.Task]
|
68,491 |
exe_kg_lib.classes.exe_kg
|
_parse_data_entity_by_iri
|
Parses an input or output data entity of self.input_kg and stores the parsed info in a Python object
Args:
in_out_data_entity_iri: IRI of the KG entity to parse
Returns:
None: if given IRI does not belong to an instance of a sub-class of self.top_level_schema.namespace.DataEntity
DataEntity: object with data entity's parsed properties
|
def _parse_data_entity_by_iri(self, in_out_data_entity_iri: str) -> Optional[DataEntity]:
"""
Parses an input or output data entity of self.input_kg and stores the parsed info in a Python object
Args:
in_out_data_entity_iri: IRI of the KG entity to parse
Returns:
None: if given IRI does not belong to an instance of a sub-class of self.top_level_schema.namespace.DataEntity
DataEntity: object with data entity's parsed properties
"""
# fetch type of entity with given IRI
query_result = get_first_query_result_if_exists(
query_entity_parent_iri,
self.input_kg,
in_out_data_entity_iri,
self.top_level_schema.namespace.DataEntity,
)
if query_result is None:
return None
data_entity_parent_iri = str(query_result[0])
# fetch IRI of data entity that is referenced by the given entity
query_result = get_first_query_result_if_exists(
query_data_entity_reference_iri,
self.input_kg,
self.top_level_schema.namespace_prefix,
in_out_data_entity_iri,
)
if query_result is None: # no referenced data entity found
data_entity_ref_iri = in_out_data_entity_iri
else:
data_entity_ref_iri = str(query_result[0])
# create DataEntity object to store all the parsed properties
data_entity = DataEntity(in_out_data_entity_iri, Entity(data_entity_parent_iri))
data_entity.has_reference = data_entity_ref_iri.split("#")[1]
for s, p, o in self.input_kg.triples((URIRef(data_entity_ref_iri), None, None)):
# parse property name and value
field_name = property_name_to_field_name(str(p))
if not hasattr(data_entity, field_name) or field_name == "type":
continue
field_value = self._property_value_to_field_value(str(o))
setattr(data_entity, field_name, field_value) # set field value dynamically
return data_entity
|
(self, in_out_data_entity_iri: str) -> Optional[exe_kg_lib.classes.data_entity.DataEntity]
|
68,492 |
exe_kg_lib.classes.exe_kg
|
_parse_kgs
|
Fills lists with subclasses of top-level KG schema classes and initializes dicts used for unique naming
|
def _parse_kgs(self) -> None:
"""
Fills lists with subclasses of top-level KG schema classes and initializes dicts used for unique naming
"""
atomic_task_subclasses = get_subclasses_of(self.atomic_task.iri, self.input_kg)
for t in list(atomic_task_subclasses):
task = Entity(t[0], self.atomic_task)
self.atomic_task_list.append(task)
self.task_type_dict[task.name] = 1
atomic_method_subclasses = get_subclasses_of(self.atomic_method.iri, self.input_kg)
for m in list(atomic_method_subclasses):
method = Entity(m[0], self.atomic_method)
self.atomic_method_list.append(method)
self.method_type_dict[method.name] = 1
data_type_subclasses = get_subclasses_of(self.data_entity.iri, self.input_kg)
for d in list(data_type_subclasses):
data_type = Entity(d[0], self.data_entity)
self.data_type_list.append(data_type)
data_semantics_subclasses = get_subclasses_of(self.data_semantics.iri, self.top_level_schema.kg)
for d in list(data_semantics_subclasses):
if d[0] == self.data_entity.iri:
continue
data_semantics = Entity(d[0], self.data_semantics)
self.data_semantics_list.append(data_semantics)
data_structure_subclasses = get_subclasses_of(self.data_structure.iri, self.top_level_schema.kg)
for d in list(data_structure_subclasses):
if d[0] == self.data_entity.iri:
continue
data_structure = Entity(d[0], self.data_structure)
self.data_structure_list.append(data_structure)
|
(self) -> NoneType
|
68,493 |
exe_kg_lib.classes.exe_kg
|
_parse_task_by_iri
|
Parses a task of self.input_kg and stores the info in an object of a sub-class of Task
The sub-class name and the object's fields are mapped dynamically based on the found KG components
Args:
task_iri: IRI of the task to be parsed
canvas_method: optional object to pass as argument for task object initialization
Returns:
Task: object of a sub-class of Task, containing all the parsed info
|
def _parse_task_by_iri(self, task_iri: str, canvas_method: visual_tasks.CanvasTaskCanvasMethod = None) -> Task:
"""
Parses a task of self.input_kg and stores the info in an object of a sub-class of Task
The sub-class name and the object's fields are mapped dynamically based on the found KG components
Args:
task_iri: IRI of the task to be parsed
canvas_method: optional object to pass as argument for task object initialization
Returns:
Task: object of a sub-class of Task, containing all the parsed info
"""
# fetch type of entity with given IRI
query_result = get_first_query_result_if_exists(
query_entity_parent_iri,
self.input_kg,
task_iri,
self.top_level_schema.namespace.AtomicTask,
)
if (
query_result is None
): # given IRI does not belong to an instance of a sub-class of self.top_level_schema.namespace.AtomicTask
print(f"Cannot retrieve parent of task with iri {task_iri}. Exiting...")
exit(1)
task_parent_iri = str(query_result[0])
task = Task(task_iri, Task(task_parent_iri))
method = get_method_by_task_iri(
self.input_kg,
self.top_level_schema.namespace_prefix,
self.top_level_schema.namespace,
task_iri,
)
if method is None:
print(f"Cannot retrieve method for task with iri: {task_iri}")
# perform automatic mapping of KG task class to Python sub-class
class_name = task.type + method.type
Class = getattr(visual_tasks, class_name, None)
if Class is None:
Class = getattr(statistic_tasks, class_name, None)
if Class is None:
Class = getattr(ml_tasks, class_name, None)
# create Task sub-class object
if canvas_method:
task = Class(task_iri, Task(task_parent_iri), canvas_method)
else:
task = Class(task_iri, Task(task_parent_iri))
task_related_triples = self.input_kg.triples((URIRef(task_iri), None, None))
method_related_triples = self.input_kg.triples((URIRef(method.iri), None, None))
for s, p, o in itertools.chain(task_related_triples, method_related_triples):
# parse property name and value
field_name = property_name_to_field_name(str(p))
if not hasattr(task, field_name) or field_name == "type":
continue
field_value = self._property_value_to_field_value(str(o))
# set field value dynamically
if field_name == "has_input" or field_name == "has_output":
getattr(task, field_name).append(field_value)
else:
setattr(task, field_name, field_value)
return task
|
(self, task_iri: str, canvas_method: Optional[exe_kg_lib.classes.tasks.visual_tasks.CanvasTaskCanvasMethod] = None) -> exe_kg_lib.classes.task.Task
|
68,494 |
exe_kg_lib.classes.exe_kg
|
_property_value_to_field_value
|
Converts property value to Python class field value
If property_value is not a data entity's IRI, it is returned as is
Else, its property values are converted recursively and stored in a DataEntity object
Args:
property_value: value of the property as found in KG
Returns:
str: property_value parameter as is
DataEntity: object containing parsed data entity properties
|
def _property_value_to_field_value(self, property_value: str) -> Union[str, DataEntity]:
"""
Converts property value to Python class field value
If property_value is not a data entity's IRI, it is returned as is
Else, its property values are converted recursively and stored in a DataEntity object
Args:
property_value: value of the property as found in KG
Returns:
str: property_value parameter as is
DataEntity: object containing parsed data entity properties
"""
if "#" in property_value:
data_entity = self._parse_data_entity_by_iri(property_value)
if data_entity is None:
return property_value
return data_entity
return property_value
|
(self, property_value: str) -> Union[str, exe_kg_lib.classes.data_entity.DataEntity]
|
68,495 |
exe_kg_lib.classes.exe_kg
|
add_task
|
Instantiates and adds a new task entity to self.output_kg
Components attached to the task during creation: input and output data entities, and a method with properties
Args:
kg_schema_short: abbreviated name of the KG schema in which the task and method belong
task: task name
input_data_entity_dict: keys -> input names of the specified task
values -> lists of DataEntity objects to be added as input to the task
method: method name
properties_dict: keys -> property names of the specified method
values -> values to be added as parameters to the method
Returns:
Task: object of the created task
|
def add_task(
self,
kg_schema_short: str,
task: str,
input_data_entity_dict: Dict[str, List[DataEntity]],
method: str,
properties_dict: Dict[str, Union[str, int, float]],
) -> Task:
"""
Instantiates and adds a new task entity to self.output_kg
Components attached to the task during creation: input and output data entities, and a method with properties
Args:
kg_schema_short: abbreviated name of the KG schema in which the task and method belong
task: task name
input_data_entity_dict: keys -> input names of the specified task
values -> lists of DataEntity objects to be added as input to the task
method: method name
properties_dict: keys -> property names of the specified method
values -> values to be added as parameters to the method
Returns:
Task: object of the created task
"""
kg_schema_to_use = self.bottom_level_schemata[kg_schema_short]
relation_iri = (
self.top_level_schema.namespace.hasNextTask
if self.last_created_task.type != "Pipeline"
else self.top_level_schema.namespace.hasStartTask
) # use relation depending on the previous task
# instantiate task and link it with the previous one
parent_task = Task(kg_schema_to_use.namespace + task, self.atomic_task)
added_entity = add_instance_from_parent_with_relation(
kg_schema_to_use.namespace,
self.output_kg,
parent_task,
relation_iri,
self.last_created_task,
name_instance(self.task_type_dict, self.method_type_dict, parent_task),
)
next_task = Task.from_entity(added_entity) # create Task object from Entity object
# instantiate and add given input data entities to the task
self._add_inputs_to_task(kg_schema_to_use.namespace, next_task, input_data_entity_dict)
# instantiate and add output data entities to the task, as specified in the KG schema
self._add_outputs_to_task(next_task)
method_parent = Entity(kg_schema_to_use.namespace + method, self.atomic_method)
# fetch compatible methods and their properties from KG schema
results = list(
get_method_properties_and_methods(
self.input_kg,
self.top_level_schema.namespace_prefix,
next_task.parent_entity.iri,
)
)
chosen_property_method = next(
filter(lambda pair: pair[1].split("#")[1] == method, results), None
) # match given method_type with query result
if chosen_property_method is None:
print(f"Property connecting task of type {task} with method of type {method} not found")
exit(1)
# instantiate method and link it with the task using the appropriate chosen_property_method[0] relation
method_entity = add_instance_from_parent_with_relation(
kg_schema_to_use.namespace,
self.output_kg,
method_parent,
chosen_property_method[0],
next_task,
name_instance(self.task_type_dict, self.method_type_dict, method_parent),
)
# fetch compatible data properties from KG schema
property_list = get_data_properties_plus_inherited_by_class_iri(self.input_kg, method_parent.iri)
# add data properties to the task with given values
for pair in property_list:
property_iri = pair[0]
property_name = property_iri.split("#")[1]
range_iri = pair[1]
input_property = Literal(
lexical_or_value=properties_dict[property_name],
datatype=range_iri,
)
add_literal(self.output_kg, method_entity, property_iri, input_property)
self.last_created_task = next_task # store created task
return next_task
|
(self, kg_schema_short: str, task: str, input_data_entity_dict: Dict[str, List[exe_kg_lib.classes.data_entity.DataEntity]], method: str, properties_dict: Dict[str, Union[str, int, float]]) -> exe_kg_lib.classes.task.Task
|
68,496 |
exe_kg_lib.classes.exe_kg
|
create_data_entity
|
Creates a DataEntity object
Args:
name: name of the data entity
source_value: name of the data source corresponding to a column of the data
data_semantics_name: name of the data semantics entity
data_structure_name: name of the data structure entity
Returns:
DataEntity: object initialized with the given parameter values
|
def create_data_entity(
self,
name: str,
source_value: str,
data_semantics_name: str,
data_structure_name: str,
) -> DataEntity:
"""
Creates a DataEntity object
Args:
name: name of the data entity
source_value: name of the data source corresponding to a column of the data
data_semantics_name: name of the data semantics entity
data_structure_name: name of the data structure entity
Returns:
DataEntity: object initialized with the given parameter values
"""
return DataEntity(
self.top_level_schema.namespace + name,
self.data_entity,
source_value,
self.top_level_schema.namespace + data_semantics_name,
self.top_level_schema.namespace + data_structure_name,
)
|
(self, name: str, source_value: str, data_semantics_name: str, data_structure_name: str) -> exe_kg_lib.classes.data_entity.DataEntity
|
68,497 |
exe_kg_lib.classes.exe_kg
|
create_pipeline_task
|
Instantiates and adds a new pipeline task entity to self.output_kg
Args:
pipeline_name: name for the pipeline
input_data_path: path for the input data to be used by the pipeline's tasks
Returns:
Task: created pipeline
|
def create_pipeline_task(self, pipeline_name: str, input_data_path: str) -> Task:
"""
Instantiates and adds a new pipeline task entity to self.output_kg
Args:
pipeline_name: name for the pipeline
input_data_path: path for the input data to be used by the pipeline's tasks
Returns:
Task: created pipeline
"""
pipeline = create_pipeline_task(
self.top_level_schema.namespace,
self.pipeline,
self.output_kg,
pipeline_name,
input_data_path,
)
self.last_created_task = pipeline
return pipeline
|
(self, pipeline_name: str, input_data_path: str) -> exe_kg_lib.classes.task.Task
|
68,498 |
exe_kg_lib.classes.exe_kg
|
execute_pipeline
|
Retrieves and executes pipeline by parsing self.input_kg
|
def execute_pipeline(self):
"""
Retrieves and executes pipeline by parsing self.input_kg
"""
pipeline_iri, input_data_path, next_task_iri = get_pipeline_and_first_task_iri(
self.input_kg, self.top_level_schema.namespace_prefix
)
input_data = pd.read_csv(input_data_path, delimiter=",", encoding="ISO-8859-1")
canvas_method = None # stores Task object that corresponds to a task of type CanvasTask
task_output_dict = {} # gradually filled with outputs of executed tasks
while next_task_iri is not None:
next_task = self._parse_task_by_iri(next_task_iri, canvas_method)
output = next_task.run_method(task_output_dict, input_data)
if output:
task_output_dict.update(output)
if next_task.type == "CanvasTask":
canvas_method = next_task
next_task_iri = next_task.has_next_task
|
(self)
|
68,499 |
exe_kg_lib.classes.exe_kg
|
save_created_kg
|
Saves self.output_kg to a file
Args:
file_path: path of the output file
|
def save_created_kg(self, file_path: str) -> None:
"""
Saves self.output_kg to a file
Args:
file_path: path of the output file
"""
check_kg_executability(self.output_kg)
dir_path = os.path.dirname(file_path)
os.makedirs(dir_path, exist_ok=True)
self.output_kg.serialize(destination=file_path)
print(f"Executable KG saved in {file_path}")
|
(self, file_path: str) -> NoneType
|
68,500 |
exe_kg_lib.classes.exe_kg
|
start_pipeline_creation
|
Handles the pipeline creation through CLI
Args:
pipeline_name: name for the pipeline
input_data_path: path for the input data to be used by the pipeline's tasks
|
def start_pipeline_creation(self, pipeline_name: str, input_data_path: str) -> None:
"""
Handles the pipeline creation through CLI
Args:
pipeline_name: name for the pipeline
input_data_path: path for the input data to be used by the pipeline's tasks
"""
pipeline = create_pipeline_task(
self.top_level_schema.namespace,
self.pipeline,
self.output_kg,
pipeline_name,
input_data_path,
)
self.last_created_task = pipeline
while True:
next_task = self._create_next_task_cli()
if next_task is None:
break
self._create_method(next_task)
|
(self, pipeline_name: str, input_data_path: str) -> NoneType
|
68,503 |
mltable.mltable
|
DataType
|
Helper class for handling the proper manipulation of supported column types (int, bool, string, etc.).
Currently used with `MLTable.convert_column_types(...)` & `from_delimited_files(...)` for specifying which types
to convert columns to. Different types are selected with `DataType.from_*(...)` methods.
|
class DataType:
"""
Helper class for handling the proper manipulation of supported column types (int, bool, string, etc.).
Currently used with `MLTable.convert_column_types(...)` & `from_delimited_files(...)` for specifying which types
to convert columns to. Different types are selected with `DataType.from_*(...)` methods.
"""
_MISMATCH_AS_TYPES = ('error', 'true', 'false')
@staticmethod
def _from_raw(value):
if isinstance(value, DataType):
return value
if value == 'string':
return DataType.to_string()
if value == 'int':
return DataType.to_int()
if value == 'float':
return DataType.to_float()
if value == 'boolean':
return DataType.to_bool()
if value == 'stream_info':
return DataType.to_stream()
raise UserErrorException(f"'{value}' is not a supported string conversion for `mltable.DataType`, "
"supported types are 'string', 'int', 'float', 'boolean', & 'stream_info'")
@staticmethod
def _create(data_type):
dt = DataType()
dt._data_type = data_type
dt._arguments = _SIMPLE_TYPES.get(data_type)
return dt
@staticmethod
def _format_str_list(values, var_name):
if values is None:
return None
if isinstance(values, str):
return [values]
if isinstance(values, (list, tuple, set)) and any(not isinstance(x, str) for x in values):
raise UserErrorException(f'`{var_name}` must only consists of strings')
if len(values) == 0:
return None
return list(values)
@staticmethod
def to_string():
"""Configure conversion to string."""
return DataType._create(FieldType.STRING)
@staticmethod
def to_int():
"""Configure conversion to 64-bit integer."""
return DataType._create(FieldType.INTEGER)
@staticmethod
def to_float():
"""Configure conversion to 64-bit float."""
return DataType._create(FieldType.DECIMAL)
@staticmethod
def to_bool(true_values: Optional[List[str]] = None,
false_values: Optional[List[str]] = None,
mismatch_as: Optional[str] = None):
"""
Configure conversion to bool. `true_values` & `false_values` must both be None or non-empty lists of,
string else an error will be thrown.
:param true_values: List of values in dataset to designate as True.
For example, ['1', 'yes'] will be replaced as [True, True].
The true_values need to be present in the dataset otherwise None will be returned for values not present.
:type true_values: builtin.list[str]
:param false_values: List of values in dataset to designate as False.
For example, ['0', 'no'] will be replaced as [False, False].
The false_values need to be present in the dataset otherwise None will be returned for values not present.
:type false_values: builtin.list[str]
:param mismatch_as: How cast strings that are neither in `true_values` or `false_values`; 'true' casts all as
True, 'false' as False, and 'error' will error instead of casting. Defaults to None which equal to 'error'.
:type mismatch_as: Optional[str]
"""
dt = DataType._create(FieldType.BOOLEAN)
if mismatch_as is not None and mismatch_as not in DataType._MISMATCH_AS_TYPES:
raise UserErrorException(f"`mismatch_as` can only be {DataType._MISMATCH_AS_TYPES}")
true_values = DataType._format_str_list(true_values, 'true_values')
false_values = DataType._format_str_list(false_values, 'false_values')
if (true_values is None) != (false_values is None):
raise UserErrorException('`true_values` and `false_values` must both be None or non-empty list of strings')
if true_values is not None and false_values is not None \
and (len(set(true_values).intersection(false_values)) > 0):
raise UserErrorException('`true_values` and `false_values` can not have overlapping values')
type_name = dt._arguments
args = {type_name: {}}
if true_values and false_values:
args[type_name]['true_values'] = true_values
args[type_name]['false_values'] = false_values
args[type_name]['mismatch_as'] = 'error'
if mismatch_as:
args[type_name]['mismatch_as'] = mismatch_as
dt._arguments = args if args[type_name] else type_name
return dt
@staticmethod
def to_stream():
"""Configure conversion to stream."""
return DataType._create(FieldType.STREAM)
@staticmethod
def to_datetime(formats: Union[str, List[str]], date_constant: Optional[str] = None):
"""
Configure conversion to datetime.
:param formats: Formats to try for datetime conversion. For example `%d-%m-%Y` for data in "day-month-year",
and `%Y-%m-%dT%H:%M:%S.%f` for "combined date and time representation" according to ISO 8601.
* %Y: Year with 4 digits
* %y: Year with 2 digits
* %m: Month in digits
* %b: Month represented by its abbreviated name in 3 letters, like Aug
* %B: Month represented by its full name, like August
* %d: Day in digits
* %H: Hour as represented in 24-hour clock time
* %I: Hour as represented in 12-hour clock time
* %M: Minute in 2 digits
* %S: Second in 2 digits
* %f: Microsecond
* %p: AM/PM designator
* %z: Timezone, for example: -0700
:type formats: str or builtin.list[str]
:param date_constant: If the column contains only time values, a date to apply to the resulting DateTime.
:type date_constant: Optional[str]
"""
dt = DataType._create(FieldType.DATE)
type_name = _SIMPLE_TYPES.get(FieldType.DATE)
if isinstance(formats, str):
formats = [formats]
elif not (isinstance(formats, (list, tuple)) and all(isinstance(x, str) for x in formats)):
raise UserErrorException(
'Expect `formats` to be a single string, a list of strings, or a tuple of strings')
dt._arguments = {type_name: {'formats': formats}}
if date_constant is not None:
dt._arguments[type_name]['date_constant'] = date_constant
return dt
|
()
|
68,504 |
mltable.mltable
|
_create
| null |
@staticmethod
def _create(data_type):
dt = DataType()
dt._data_type = data_type
dt._arguments = _SIMPLE_TYPES.get(data_type)
return dt
|
(data_type)
|
68,505 |
mltable.mltable
|
_format_str_list
| null |
@staticmethod
def _format_str_list(values, var_name):
if values is None:
return None
if isinstance(values, str):
return [values]
if isinstance(values, (list, tuple, set)) and any(not isinstance(x, str) for x in values):
raise UserErrorException(f'`{var_name}` must only consists of strings')
if len(values) == 0:
return None
return list(values)
|
(values, var_name)
|
68,506 |
mltable.mltable
|
_from_raw
| null |
@staticmethod
def _from_raw(value):
if isinstance(value, DataType):
return value
if value == 'string':
return DataType.to_string()
if value == 'int':
return DataType.to_int()
if value == 'float':
return DataType.to_float()
if value == 'boolean':
return DataType.to_bool()
if value == 'stream_info':
return DataType.to_stream()
raise UserErrorException(f"'{value}' is not a supported string conversion for `mltable.DataType`, "
"supported types are 'string', 'int', 'float', 'boolean', & 'stream_info'")
|
(value)
|
68,507 |
mltable.mltable
|
to_bool
|
Configure conversion to bool. `true_values` & `false_values` must both be None or non-empty lists of,
string else an error will be thrown.
:param true_values: List of values in dataset to designate as True.
For example, ['1', 'yes'] will be replaced as [True, True].
The true_values need to be present in the dataset otherwise None will be returned for values not present.
:type true_values: builtin.list[str]
:param false_values: List of values in dataset to designate as False.
For example, ['0', 'no'] will be replaced as [False, False].
The false_values need to be present in the dataset otherwise None will be returned for values not present.
:type false_values: builtin.list[str]
:param mismatch_as: How cast strings that are neither in `true_values` or `false_values`; 'true' casts all as
True, 'false' as False, and 'error' will error instead of casting. Defaults to None which equal to 'error'.
:type mismatch_as: Optional[str]
|
@staticmethod
def to_bool(true_values: Optional[List[str]] = None,
false_values: Optional[List[str]] = None,
mismatch_as: Optional[str] = None):
"""
Configure conversion to bool. `true_values` & `false_values` must both be None or non-empty lists of,
string else an error will be thrown.
:param true_values: List of values in dataset to designate as True.
For example, ['1', 'yes'] will be replaced as [True, True].
The true_values need to be present in the dataset otherwise None will be returned for values not present.
:type true_values: builtin.list[str]
:param false_values: List of values in dataset to designate as False.
For example, ['0', 'no'] will be replaced as [False, False].
The false_values need to be present in the dataset otherwise None will be returned for values not present.
:type false_values: builtin.list[str]
:param mismatch_as: How cast strings that are neither in `true_values` or `false_values`; 'true' casts all as
True, 'false' as False, and 'error' will error instead of casting. Defaults to None which equal to 'error'.
:type mismatch_as: Optional[str]
"""
dt = DataType._create(FieldType.BOOLEAN)
if mismatch_as is not None and mismatch_as not in DataType._MISMATCH_AS_TYPES:
raise UserErrorException(f"`mismatch_as` can only be {DataType._MISMATCH_AS_TYPES}")
true_values = DataType._format_str_list(true_values, 'true_values')
false_values = DataType._format_str_list(false_values, 'false_values')
if (true_values is None) != (false_values is None):
raise UserErrorException('`true_values` and `false_values` must both be None or non-empty list of strings')
if true_values is not None and false_values is not None \
and (len(set(true_values).intersection(false_values)) > 0):
raise UserErrorException('`true_values` and `false_values` can not have overlapping values')
type_name = dt._arguments
args = {type_name: {}}
if true_values and false_values:
args[type_name]['true_values'] = true_values
args[type_name]['false_values'] = false_values
args[type_name]['mismatch_as'] = 'error'
if mismatch_as:
args[type_name]['mismatch_as'] = mismatch_as
dt._arguments = args if args[type_name] else type_name
return dt
|
(true_values: Optional[List[str]] = None, false_values: Optional[List[str]] = None, mismatch_as: Optional[str] = None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.