index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
39,373 | pyspark.sql.dataframe | drop | Returns a new :class:`DataFrame` without specified columns.
This is a no-op if the schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Returns
-------
:class:`DataFrame`
DataFrame without given columns.
Notes
-----
When an input is a column name, it is treated literally without further interpretation.
Otherwise, will try to match the equivalent expression.
So that dropping column by its name `drop(colName)` has different semantic with directly
dropping the column `drop(col(colName))`.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import col, lit
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df2 = spark.createDataFrame([Row(height=80, name="Tom"), Row(height=85, name="Bob")])
>>> df.drop('age').show()
+-----+
| name|
+-----+
| Tom|
|Alice|
| Bob|
+-----+
>>> df.drop(df.age).show()
+-----+
| name|
+-----+
| Tom|
|Alice|
| Bob|
+-----+
Drop the column that joined both DataFrames on.
>>> df.join(df2, df.name == df2.name, 'inner').drop('name').sort('age').show()
+---+------+
|age|height|
+---+------+
| 14| 80|
| 16| 85|
+---+------+
>>> df3 = df.join(df2)
>>> df3.show()
+---+-----+------+----+
|age| name|height|name|
+---+-----+------+----+
| 14| Tom| 80| Tom|
| 14| Tom| 85| Bob|
| 23|Alice| 80| Tom|
| 23|Alice| 85| Bob|
| 16| Bob| 80| Tom|
| 16| Bob| 85| Bob|
+---+-----+------+----+
Drop two column by the same name.
>>> df3.drop("name").show()
+---+------+
|age|height|
+---+------+
| 14| 80|
| 14| 85|
| 23| 80|
| 23| 85|
| 16| 80|
| 16| 85|
+---+------+
Can not drop col('name') due to ambiguous reference.
>>> df3.drop(col("name")).show()
Traceback (most recent call last):
...
pyspark.errors.exceptions.captured.AnalysisException: [AMBIGUOUS_REFERENCE] Reference...
>>> df4 = df.withColumn("a.b.c", lit(1))
>>> df4.show()
+---+-----+-----+
|age| name|a.b.c|
+---+-----+-----+
| 14| Tom| 1|
| 23|Alice| 1|
| 16| Bob| 1|
+---+-----+-----+
>>> df4.drop("a.b.c").show()
+---+-----+
|age| name|
+---+-----+
| 14| Tom|
| 23|Alice|
| 16| Bob|
+---+-----+
Can not find a column matching the expression "a.b.c".
>>> df4.drop(col("a.b.c")).show()
+---+-----+-----+
|age| name|a.b.c|
+---+-----+-----+
| 14| Tom| 1|
| 23|Alice| 1|
| 16| Bob| 1|
+---+-----+-----+
| def drop(self, *cols: "ColumnOrName") -> "DataFrame": # type: ignore[misc]
"""Returns a new :class:`DataFrame` without specified columns.
This is a no-op if the schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Returns
-------
:class:`DataFrame`
DataFrame without given columns.
Notes
-----
When an input is a column name, it is treated literally without further interpretation.
Otherwise, will try to match the equivalent expression.
So that dropping column by its name `drop(colName)` has different semantic with directly
dropping the column `drop(col(colName))`.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import col, lit
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df2 = spark.createDataFrame([Row(height=80, name="Tom"), Row(height=85, name="Bob")])
>>> df.drop('age').show()
+-----+
| name|
+-----+
| Tom|
|Alice|
| Bob|
+-----+
>>> df.drop(df.age).show()
+-----+
| name|
+-----+
| Tom|
|Alice|
| Bob|
+-----+
Drop the column that joined both DataFrames on.
>>> df.join(df2, df.name == df2.name, 'inner').drop('name').sort('age').show()
+---+------+
|age|height|
+---+------+
| 14| 80|
| 16| 85|
+---+------+
>>> df3 = df.join(df2)
>>> df3.show()
+---+-----+------+----+
|age| name|height|name|
+---+-----+------+----+
| 14| Tom| 80| Tom|
| 14| Tom| 85| Bob|
| 23|Alice| 80| Tom|
| 23|Alice| 85| Bob|
| 16| Bob| 80| Tom|
| 16| Bob| 85| Bob|
+---+-----+------+----+
Drop two column by the same name.
>>> df3.drop("name").show()
+---+------+
|age|height|
+---+------+
| 14| 80|
| 14| 85|
| 23| 80|
| 23| 85|
| 16| 80|
| 16| 85|
+---+------+
Can not drop col('name') due to ambiguous reference.
>>> df3.drop(col("name")).show()
Traceback (most recent call last):
...
pyspark.errors.exceptions.captured.AnalysisException: [AMBIGUOUS_REFERENCE] Reference...
>>> df4 = df.withColumn("a.b.c", lit(1))
>>> df4.show()
+---+-----+-----+
|age| name|a.b.c|
+---+-----+-----+
| 14| Tom| 1|
| 23|Alice| 1|
| 16| Bob| 1|
+---+-----+-----+
>>> df4.drop("a.b.c").show()
+---+-----+
|age| name|
+---+-----+
| 14| Tom|
| 23|Alice|
| 16| Bob|
+---+-----+
Can not find a column matching the expression "a.b.c".
>>> df4.drop(col("a.b.c")).show()
+---+-----+-----+
|age| name|a.b.c|
+---+-----+-----+
| 14| Tom| 1|
| 23|Alice| 1|
| 16| Bob| 1|
+---+-----+-----+
"""
column_names: List[str] = []
java_columns: List[JavaObject] = []
for c in cols:
if isinstance(c, str):
column_names.append(c)
elif isinstance(c, Column):
java_columns.append(c._jc)
else:
raise PySparkTypeError(
error_class="NOT_COLUMN_OR_STR",
message_parameters={"arg_name": "col", "arg_type": type(c).__name__},
)
jdf = self._jdf
if len(java_columns) > 0:
first_column, *remaining_columns = java_columns
jdf = jdf.drop(first_column, self._jseq(remaining_columns))
if len(column_names) > 0:
jdf = jdf.drop(self._jseq(column_names))
return DataFrame(jdf, self.sparkSession)
| (self, *cols: 'ColumnOrName') -> 'DataFrame' |
39,374 | pyspark.sql.dataframe | dropDuplicates | Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and the system will accordingly limit the state. In addition, data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
subset : List of column names, optional
List of columns to use for duplicate comparison (default All columns).
Returns
-------
:class:`DataFrame`
DataFrame without duplicates.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(name='Alice', age=5, height=80),
... Row(name='Alice', age=5, height=80),
... Row(name='Alice', age=10, height=80)
... ])
Deduplicate the same rows.
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
Deduplicate values on 'name' and 'height' columns.
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
| def dropDuplicates(self, subset: Optional[List[str]] = None) -> "DataFrame":
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and the system will accordingly limit the state. In addition, data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
subset : List of column names, optional
List of columns to use for duplicate comparison (default All columns).
Returns
-------
:class:`DataFrame`
DataFrame without duplicates.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(name='Alice', age=5, height=80),
... Row(name='Alice', age=5, height=80),
... Row(name='Alice', age=10, height=80)
... ])
Deduplicate the same rows.
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
Deduplicate values on 'name' and 'height' columns.
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is not None and (not isinstance(subset, Iterable) or isinstance(subset, str)):
raise PySparkTypeError(
error_class="NOT_LIST_OR_TUPLE",
message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
)
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sparkSession)
| (self, subset: Optional[List[str]] = None) -> pyspark.sql.dataframe.DataFrame |
39,375 | pyspark.sql.dataframe | dropDuplicatesWithinWatermark | Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns, within watermark.
This only works with streaming :class:`DataFrame`, and watermark for the input
:class:`DataFrame` must be set via :func:`withWatermark`.
For a streaming :class:`DataFrame`, this will keep all data across triggers as intermediate
state to drop duplicated rows. The state will be kept to guarantee the semantic, "Events
are deduplicated as long as the time distance of earliest and latest events are smaller
than the delay threshold of watermark." Users are encouraged to set the delay threshold of
watermark longer than max timestamp differences among duplicated events.
Note: too late data older than watermark will be dropped.
.. versionadded:: 3.5.0
Parameters
----------
subset : List of column names, optional
List of columns to use for duplicate comparison (default All columns).
Returns
-------
:class:`DataFrame`
DataFrame without duplicates.
Notes
-----
Supports Spark Connect.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import timestamp_seconds
>>> df = spark.readStream.format("rate").load().selectExpr(
... "value % 5 AS value", "timestamp")
>>> df.select("value", df.timestamp.alias("time")).withWatermark("time", '10 minutes')
DataFrame[value: bigint, time: timestamp]
Deduplicate the same rows.
>>> df.dropDuplicatesWithinWatermark() # doctest: +SKIP
Deduplicate values on 'value' columns.
>>> df.dropDuplicatesWithinWatermark(['value']) # doctest: +SKIP
| def dropDuplicatesWithinWatermark(self, subset: Optional[List[str]] = None) -> "DataFrame":
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns, within watermark.
This only works with streaming :class:`DataFrame`, and watermark for the input
:class:`DataFrame` must be set via :func:`withWatermark`.
For a streaming :class:`DataFrame`, this will keep all data across triggers as intermediate
state to drop duplicated rows. The state will be kept to guarantee the semantic, "Events
are deduplicated as long as the time distance of earliest and latest events are smaller
than the delay threshold of watermark." Users are encouraged to set the delay threshold of
watermark longer than max timestamp differences among duplicated events.
Note: too late data older than watermark will be dropped.
.. versionadded:: 3.5.0
Parameters
----------
subset : List of column names, optional
List of columns to use for duplicate comparison (default All columns).
Returns
-------
:class:`DataFrame`
DataFrame without duplicates.
Notes
-----
Supports Spark Connect.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import timestamp_seconds
>>> df = spark.readStream.format("rate").load().selectExpr(
... "value % 5 AS value", "timestamp")
>>> df.select("value", df.timestamp.alias("time")).withWatermark("time", '10 minutes')
DataFrame[value: bigint, time: timestamp]
Deduplicate the same rows.
>>> df.dropDuplicatesWithinWatermark() # doctest: +SKIP
Deduplicate values on 'value' columns.
>>> df.dropDuplicatesWithinWatermark(['value']) # doctest: +SKIP
"""
if subset is not None and (not isinstance(subset, Iterable) or isinstance(subset, str)):
raise PySparkTypeError(
error_class="NOT_LIST_OR_TUPLE",
message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
)
if subset is None:
jdf = self._jdf.dropDuplicatesWithinWatermark()
else:
jdf = self._jdf.dropDuplicatesWithinWatermark(self._jseq(subset))
return DataFrame(jdf, self.sparkSession)
| (self, subset: Optional[List[str]] = None) -> pyspark.sql.dataframe.DataFrame |
39,376 | pyspark.sql.dataframe | dropDuplicates | :func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4 | def dropDuplicates(self, subset: Optional[List[str]] = None) -> "DataFrame":
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and the system will accordingly limit the state. In addition, data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
subset : List of column names, optional
List of columns to use for duplicate comparison (default All columns).
Returns
-------
:class:`DataFrame`
DataFrame without duplicates.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(name='Alice', age=5, height=80),
... Row(name='Alice', age=5, height=80),
... Row(name='Alice', age=10, height=80)
... ])
Deduplicate the same rows.
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
Deduplicate values on 'name' and 'height' columns.
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is not None and (not isinstance(subset, Iterable) or isinstance(subset, str)):
raise PySparkTypeError(
error_class="NOT_LIST_OR_TUPLE",
message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
)
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sparkSession)
| (self, subset=None) |
39,377 | pyspark.sql.dataframe | dropna | Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Returns
-------
:class:`DataFrame`
DataFrame with null only rows excluded.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(age=10, height=80, name="Alice"),
... Row(age=5, height=None, name="Bob"),
... Row(age=None, height=None, name="Tom"),
... Row(age=None, height=None, name=None),
... ])
>>> df.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
| def dropna(
self,
how: str = "any",
thresh: Optional[int] = None,
subset: Optional[Union[str, Tuple[str, ...], List[str]]] = None,
) -> "DataFrame":
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Returns
-------
:class:`DataFrame`
DataFrame with null only rows excluded.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(age=10, height=80, name="Alice"),
... Row(age=5, height=None, name="Bob"),
... Row(age=None, height=None, name="Tom"),
... Row(age=None, height=None, name=None),
... ])
>>> df.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ["any", "all"]:
raise PySparkValueError(
error_class="VALUE_NOT_ANY_OR_ALL",
message_parameters={"arg_name": "how", "arg_type": how},
)
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise PySparkTypeError(
error_class="NOT_LIST_OR_STR_OR_TUPLE",
message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
)
if thresh is None:
thresh = len(subset) if how == "any" else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sparkSession)
| (self, how: str = 'any', thresh: Optional[int] = None, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> pyspark.sql.dataframe.DataFrame |
39,378 | pyspark.sql.dataframe | exceptAll | Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
The other :class:`DataFrame` to compare to.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
| def exceptAll(self, other: "DataFrame") -> "DataFrame":
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
The other :class:`DataFrame` to compare to.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame |
39,379 | pyspark.sql.dataframe | explain | Prints the (logical and physical) plans to the console for debugging purposes.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Print out the physical plan only (default).
>>> df.explain() # doctest: +SKIP
== Physical Plan ==
*(1) Scan ExistingRDD[age...,name...]
Print out all of the parsed, analyzed, optimized and physical plans.
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
Print out the plans with two sections: a physical plan outline and node details
>>> df.explain(mode="formatted") # doctest: +SKIP
== Physical Plan ==
* Scan ExistingRDD (...)
(1) Scan ExistingRDD [codegen id : ...]
Output [2]: [age..., name...]
...
Print a logical plan and statistics if they are available.
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
| def explain(
self, extended: Optional[Union[bool, str]] = None, mode: Optional[str] = None
) -> None:
"""Prints the (logical and physical) plans to the console for debugging purposes.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Print out the physical plan only (default).
>>> df.explain() # doctest: +SKIP
== Physical Plan ==
*(1) Scan ExistingRDD[age...,name...]
Print out all of the parsed, analyzed, optimized and physical plans.
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
Print out the plans with two sections: a physical plan outline and node details
>>> df.explain(mode="formatted") # doctest: +SKIP
== Physical Plan ==
* Scan ExistingRDD (...)
(1) Scan ExistingRDD [codegen id : ...]
Output [2]: [age..., name...]
...
Print a logical plan and statistics if they are available.
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise PySparkValueError(
error_class="CANNOT_SET_TOGETHER",
message_parameters={"arg_list": "extended and mode"},
)
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
if (extended is not None) and (not isinstance(extended, (bool, str))):
raise PySparkTypeError(
error_class="NOT_BOOL_OR_STR",
message_parameters={
"arg_name": "extended",
"arg_type": type(extended).__name__,
},
)
if (mode is not None) and (not isinstance(mode, str)):
raise PySparkTypeError(
error_class="NOT_STR",
message_parameters={"arg_name": "mode", "arg_type": type(mode).__name__},
)
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = cast(str, mode)
elif is_extended_as_mode:
explain_mode = cast(str, extended)
assert self._sc._jvm is not None
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
| (self, extended: Union[bool, str, NoneType] = None, mode: Optional[str] = None) -> NoneType |
39,380 | pyspark.sql.dataframe | fillna | Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data types are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Returns
-------
:class:`DataFrame`
DataFrame with replaced null values.
Examples
--------
>>> df = spark.createDataFrame([
... (10, 80.5, "Alice", None),
... (5, None, "Bob", None),
... (None, None, "Tom", None),
... (None, None, None, True)],
... schema=["age", "height", "name", "bool"])
Fill all null values with 50 for numeric columns.
>>> df.na.fill(50).show()
+---+------+-----+----+
|age|height| name|bool|
+---+------+-----+----+
| 10| 80.5|Alice|NULL|
| 5| 50.0| Bob|NULL|
| 50| 50.0| Tom|NULL|
| 50| 50.0| NULL|true|
+---+------+-----+----+
Fill all null values with ``False`` for boolean columns.
>>> df.na.fill(False).show()
+----+------+-----+-----+
| age|height| name| bool|
+----+------+-----+-----+
| 10| 80.5|Alice|false|
| 5| NULL| Bob|false|
|NULL| NULL| Tom|false|
|NULL| NULL| NULL| true|
+----+------+-----+-----+
Fill all null values with to 50 and "unknown" for 'age' and 'name' column respectively.
>>> df.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+----+
|age|height| name|bool|
+---+------+-------+----+
| 10| 80.5| Alice|NULL|
| 5| NULL| Bob|NULL|
| 50| NULL| Tom|NULL|
| 50| NULL|unknown|true|
+---+------+-------+----+
| def fillna(
self,
value: Union["LiteralType", Dict[str, "LiteralType"]],
subset: Optional[Union[str, Tuple[str, ...], List[str]]] = None,
) -> "DataFrame":
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data types are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Returns
-------
:class:`DataFrame`
DataFrame with replaced null values.
Examples
--------
>>> df = spark.createDataFrame([
... (10, 80.5, "Alice", None),
... (5, None, "Bob", None),
... (None, None, "Tom", None),
... (None, None, None, True)],
... schema=["age", "height", "name", "bool"])
Fill all null values with 50 for numeric columns.
>>> df.na.fill(50).show()
+---+------+-----+----+
|age|height| name|bool|
+---+------+-----+----+
| 10| 80.5|Alice|NULL|
| 5| 50.0| Bob|NULL|
| 50| 50.0| Tom|NULL|
| 50| 50.0| NULL|true|
+---+------+-----+----+
Fill all null values with ``False`` for boolean columns.
>>> df.na.fill(False).show()
+----+------+-----+-----+
| age|height| name| bool|
+----+------+-----+-----+
| 10| 80.5|Alice|false|
| 5| NULL| Bob|false|
|NULL| NULL| Tom|false|
|NULL| NULL| NULL| true|
+----+------+-----+-----+
Fill all null values with to 50 and "unknown" for 'age' and 'name' column respectively.
>>> df.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+----+
|age|height| name|bool|
+---+------+-------+----+
| 10| 80.5| Alice|NULL|
| 5| NULL| Bob|NULL|
| 50| NULL| Tom|NULL|
| 50| NULL|unknown|true|
+---+------+-------+----+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise PySparkTypeError(
error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR",
message_parameters={"arg_name": "value", "arg_type": type(value).__name__},
)
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sparkSession)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sparkSession)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise PySparkTypeError(
error_class="NOT_LIST_OR_TUPLE",
message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
)
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sparkSession)
| (self, value: Union[ForwardRef('LiteralType'), Dict[str, ForwardRef('LiteralType')]], subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> 'DataFrame' |
39,381 | pyspark.sql.dataframe | filter | Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expressions.
Returns
-------
:class:`DataFrame`
Filtered DataFrame.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Filter by :class:`Column` instances.
>>> df.filter(df.age > 3).show()
+---+----+
|age|name|
+---+----+
| 5| Bob|
+---+----+
>>> df.where(df.age == 2).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
+---+-----+
Filter by SQL expression in a string.
>>> df.filter("age > 3").show()
+---+----+
|age|name|
+---+----+
| 5| Bob|
+---+----+
>>> df.where("age = 2").show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
+---+-----+
| def filter(self, condition: "ColumnOrName") -> "DataFrame":
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expressions.
Returns
-------
:class:`DataFrame`
Filtered DataFrame.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Filter by :class:`Column` instances.
>>> df.filter(df.age > 3).show()
+---+----+
|age|name|
+---+----+
| 5| Bob|
+---+----+
>>> df.where(df.age == 2).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
+---+-----+
Filter by SQL expression in a string.
>>> df.filter("age > 3").show()
+---+----+
|age|name|
+---+----+
| 5| Bob|
+---+----+
>>> df.where("age = 2").show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
+---+-----+
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise PySparkTypeError(
error_class="NOT_COLUMN_OR_STR",
message_parameters={"arg_name": "condition", "arg_type": type(condition).__name__},
)
return DataFrame(jdf, self.sparkSession)
| (self, condition: 'ColumnOrName') -> 'DataFrame' |
39,382 | pyspark.sql.dataframe | first | Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
:class:`Row`
First row if :class:`DataFrame` is not empty, otherwise ``None``.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.first()
Row(age=2, name='Alice')
| def first(self) -> Optional[Row]:
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
:class:`Row`
First row if :class:`DataFrame` is not empty, otherwise ``None``.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
| (self) -> Optional[pyspark.sql.types.Row] |
39,383 | pyspark.sql.dataframe | foreach | Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Parameters
----------
f : function
A function that accepts one parameter which will
receive each row to process.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> def func(person):
... print(person.name)
...
>>> df.foreach(func)
| def foreach(self, f: Callable[[Row], None]) -> None:
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Parameters
----------
f : function
A function that accepts one parameter which will
receive each row to process.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> def func(person):
... print(person.name)
...
>>> df.foreach(func)
"""
self.rdd.foreach(f)
| (self, f: Callable[[pyspark.sql.types.Row], NoneType]) -> NoneType |
39,384 | pyspark.sql.dataframe | foreachPartition | Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Parameters
----------
f : function
A function that accepts one parameter which will receive
each partition to process.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> def func(itr):
... for person in itr:
... print(person.name)
...
>>> df.foreachPartition(func)
| def foreachPartition(self, f: Callable[[Iterator[Row]], None]) -> None:
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Parameters
----------
f : function
A function that accepts one parameter which will receive
each partition to process.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> def func(itr):
... for person in itr:
... print(person.name)
...
>>> df.foreachPartition(func)
"""
self.rdd.foreachPartition(f) # type: ignore[arg-type]
| (self, f: Callable[[Iterator[pyspark.sql.types.Row]], NoneType]) -> NoneType |
39,385 | pyspark.sql.dataframe | freqItems |
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Returns
-------
:class:`DataFrame`
DataFrame with frequent items.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df = spark.createDataFrame([(1, 11), (1, 11), (3, 10), (4, 8), (4, 8)], ["c1", "c2"])
>>> df.freqItems(["c1", "c2"]).show() # doctest: +SKIP
+------------+------------+
|c1_freqItems|c2_freqItems|
+------------+------------+
| [4, 1, 3]| [8, 11, 10]|
+------------+------------+
| def freqItems(
self, cols: Union[List[str], Tuple[str]], support: Optional[float] = None
) -> "DataFrame":
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Returns
-------
:class:`DataFrame`
DataFrame with frequent items.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df = spark.createDataFrame([(1, 11), (1, 11), (3, 10), (4, 8), (4, 8)], ["c1", "c2"])
>>> df.freqItems(["c1", "c2"]).show() # doctest: +SKIP
+------------+------------+
|c1_freqItems|c2_freqItems|
+------------+------------+
| [4, 1, 3]| [8, 11, 10]|
+------------+------------+
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise PySparkTypeError(
error_class="NOT_LIST_OR_TUPLE",
message_parameters={"arg_name": "cols", "arg_type": type(cols).__name__},
)
if not support:
support = 0.01
return DataFrame(
self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sparkSession
)
| (self, cols: Union[List[str], Tuple[str]], support: Optional[float] = None) -> pyspark.sql.dataframe.DataFrame |
39,386 | pyspark.sql.dataframe | groupBy | Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`)
or list of them.
Returns
-------
:class:`GroupedData`
Grouped data by given columns.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (2, "Bob"), (2, "Bob"), (5, "Bob")], schema=["age", "name"])
Empty grouping columns triggers a global aggregation.
>>> df.groupBy().avg().show()
+--------+
|avg(age)|
+--------+
| 2.75|
+--------+
Group-by 'name', and specify a dictionary to calculate the summation of 'age'.
>>> df.groupBy("name").agg({"age": "sum"}).sort("name").show()
+-----+--------+
| name|sum(age)|
+-----+--------+
|Alice| 2|
| Bob| 9|
+-----+--------+
Group-by 'name', and calculate maximum values.
>>> df.groupBy(df.name).max().sort("name").show()
+-----+--------+
| name|max(age)|
+-----+--------+
|Alice| 2|
| Bob| 5|
+-----+--------+
Group-by 'name' and 'age', and calculate the number of rows in each group.
>>> df.groupBy(["name", df.age]).count().sort("name", "age").show()
+-----+---+-----+
| name|age|count|
+-----+---+-----+
|Alice| 2| 1|
| Bob| 2| 2|
| Bob| 5| 1|
+-----+---+-----+
| def groupBy(self, *cols: "ColumnOrName") -> "GroupedData": # type: ignore[misc]
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`)
or list of them.
Returns
-------
:class:`GroupedData`
Grouped data by given columns.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (2, "Bob"), (2, "Bob"), (5, "Bob")], schema=["age", "name"])
Empty grouping columns triggers a global aggregation.
>>> df.groupBy().avg().show()
+--------+
|avg(age)|
+--------+
| 2.75|
+--------+
Group-by 'name', and specify a dictionary to calculate the summation of 'age'.
>>> df.groupBy("name").agg({"age": "sum"}).sort("name").show()
+-----+--------+
| name|sum(age)|
+-----+--------+
|Alice| 2|
| Bob| 9|
+-----+--------+
Group-by 'name', and calculate maximum values.
>>> df.groupBy(df.name).max().sort("name").show()
+-----+--------+
| name|max(age)|
+-----+--------+
|Alice| 2|
| Bob| 5|
+-----+--------+
Group-by 'name' and 'age', and calculate the number of rows in each group.
>>> df.groupBy(["name", df.age]).count().sort("name", "age").show()
+-----+---+-----+
| name|age|count|
+-----+---+-----+
|Alice| 2| 1|
| Bob| 2| 2|
| Bob| 5| 1|
+-----+---+-----+
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
| (self, *cols: 'ColumnOrName') -> 'GroupedData' |
39,387 | pyspark.sql.dataframe | groupBy | :func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.4 | def groupBy(self, *cols: "ColumnOrName") -> "GroupedData": # type: ignore[misc]
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`)
or list of them.
Returns
-------
:class:`GroupedData`
Grouped data by given columns.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (2, "Bob"), (2, "Bob"), (5, "Bob")], schema=["age", "name"])
Empty grouping columns triggers a global aggregation.
>>> df.groupBy().avg().show()
+--------+
|avg(age)|
+--------+
| 2.75|
+--------+
Group-by 'name', and specify a dictionary to calculate the summation of 'age'.
>>> df.groupBy("name").agg({"age": "sum"}).sort("name").show()
+-----+--------+
| name|sum(age)|
+-----+--------+
|Alice| 2|
| Bob| 9|
+-----+--------+
Group-by 'name', and calculate maximum values.
>>> df.groupBy(df.name).max().sort("name").show()
+-----+--------+
| name|max(age)|
+-----+--------+
|Alice| 2|
| Bob| 5|
+-----+--------+
Group-by 'name' and 'age', and calculate the number of rows in each group.
>>> df.groupBy(["name", df.age]).count().sort("name", "age").show()
+-----+---+-----+
| name|age|count|
+-----+---+-----+
|Alice| 2| 1|
| Bob| 2| 2|
| Bob| 5| 1|
+-----+---+-----+
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
| (self, *cols) |
39,388 | pyspark.sql.dataframe | head | Returns the first ``n`` rows.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
| def head(self, n: Optional[int] = None) -> Union[Optional[Row], List[Row]]:
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
| (self, n: Optional[int] = None) -> Union[pyspark.sql.types.Row, NoneType, List[pyspark.sql.types.Row]] |
39,389 | pyspark.sql.dataframe | hint | Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Hinted DataFrame
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df2 = spark.createDataFrame([Row(height=80, name="Tom"), Row(height=85, name="Bob")])
>>> df.join(df2, "name").explain() # doctest: +SKIP
== Physical Plan ==
...
... +- SortMergeJoin ...
...
Explicitly trigger the broadcast hashjoin by providing the hint in ``df2``.
>>> df.join(df2.hint("broadcast"), "name").explain()
== Physical Plan ==
...
... +- BroadcastHashJoin ...
...
| def hint(
self, name: str, *parameters: Union["PrimitiveType", List["PrimitiveType"]]
) -> "DataFrame":
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Hinted DataFrame
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df2 = spark.createDataFrame([Row(height=80, name="Tom"), Row(height=85, name="Bob")])
>>> df.join(df2, "name").explain() # doctest: +SKIP
== Physical Plan ==
...
... +- SortMergeJoin ...
...
Explicitly trigger the broadcast hashjoin by providing the hint in ``df2``.
>>> df.join(df2.hint("broadcast"), "name").explain()
== Physical Plan ==
...
... +- BroadcastHashJoin ...
...
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0] # type: ignore[assignment]
if not isinstance(name, str):
raise PySparkTypeError(
error_class="NOT_STR",
message_parameters={"arg_name": "name", "arg_type": type(name).__name__},
)
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise PySparkTypeError(
error_class="DISALLOWED_TYPE_FOR_CONTAINER",
message_parameters={
"arg_name": "parameters",
"arg_type": type(parameters).__name__,
"allowed_types": ", ".join(map(lambda x: x.__name__, allowed_types)),
"return_type": type(p).__name__,
},
)
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sparkSession)
| (self, name: str, *parameters: Union[ForwardRef('PrimitiveType'), List[ForwardRef('PrimitiveType')]]) -> 'DataFrame' |
39,390 | pyspark.sql.dataframe | inputFiles |
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
list
List of file paths.
Examples
--------
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as d:
... # Write a single-row DataFrame into a JSON file
... spark.createDataFrame(
... [{"age": 100, "name": "Hyukjin Kwon"}]
... ).repartition(1).write.json(d, mode="overwrite")
...
... # Read the JSON file as a DataFrame.
... df = spark.read.format("json").load(d)
...
... # Returns the number of input files.
... len(df.inputFiles())
1
| def inputFiles(self) -> List[str]:
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
list
List of file paths.
Examples
--------
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as d:
... # Write a single-row DataFrame into a JSON file
... spark.createDataFrame(
... [{"age": 100, "name": "Hyukjin Kwon"}]
... ).repartition(1).write.json(d, mode="overwrite")
...
... # Read the JSON file as a DataFrame.
... df = spark.read.format("json").load(d)
...
... # Returns the number of input files.
... len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
| (self) -> List[str] |
39,391 | pyspark.sql.dataframe | intersect | Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
Note that any duplicates are removed. To preserve duplicates
use :func:`intersectAll`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined.
Returns
-------
:class:`DataFrame`
Combined DataFrame.
Notes
-----
This is equivalent to `INTERSECT` in SQL.
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersect(df2).sort(df1.C1.desc()).show()
+---+---+
| C1| C2|
+---+---+
| b| 3|
| a| 1|
+---+---+
| def intersect(self, other: "DataFrame") -> "DataFrame":
"""Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
Note that any duplicates are removed. To preserve duplicates
use :func:`intersectAll`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined.
Returns
-------
:class:`DataFrame`
Combined DataFrame.
Notes
-----
This is equivalent to `INTERSECT` in SQL.
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersect(df2).sort(df1.C1.desc()).show()
+---+---+
| C1| C2|
+---+---+
| b| 3|
| a| 1|
+---+---+
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame |
39,392 | pyspark.sql.dataframe | intersectAll | Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined.
Returns
-------
:class:`DataFrame`
Combined DataFrame.
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
| def intersectAll(self, other: "DataFrame") -> "DataFrame":
"""Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined.
Returns
-------
:class:`DataFrame`
Combined DataFrame.
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame |
39,393 | pyspark.sql.dataframe | isEmpty |
Checks if the :class:`DataFrame` is empty and returns a boolean value.
.. versionadded:: 3.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
bool
Returns ``True`` if the DataFrame is empty, ``False`` otherwise.
See Also
--------
DataFrame.count : Counts the number of rows in DataFrame.
Notes
-----
- Unlike `count()`, this method does not trigger any computation.
- An empty DataFrame has no rows. It may have columns, but no data.
Examples
--------
Example 1: Checking if an empty DataFrame is empty
>>> df_empty = spark.createDataFrame([], 'a STRING')
>>> df_empty.isEmpty()
True
Example 2: Checking if a non-empty DataFrame is empty
>>> df_non_empty = spark.createDataFrame(["a"], 'STRING')
>>> df_non_empty.isEmpty()
False
Example 3: Checking if a DataFrame with null values is empty
>>> df_nulls = spark.createDataFrame([(None, None)], 'a STRING, b INT')
>>> df_nulls.isEmpty()
False
Example 4: Checking if a DataFrame with no rows but with columns is empty
>>> df_no_rows = spark.createDataFrame([], 'id INT, value STRING')
>>> df_no_rows.isEmpty()
True
| def isEmpty(self) -> bool:
"""
Checks if the :class:`DataFrame` is empty and returns a boolean value.
.. versionadded:: 3.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
bool
Returns ``True`` if the DataFrame is empty, ``False`` otherwise.
See Also
--------
DataFrame.count : Counts the number of rows in DataFrame.
Notes
-----
- Unlike `count()`, this method does not trigger any computation.
- An empty DataFrame has no rows. It may have columns, but no data.
Examples
--------
Example 1: Checking if an empty DataFrame is empty
>>> df_empty = spark.createDataFrame([], 'a STRING')
>>> df_empty.isEmpty()
True
Example 2: Checking if a non-empty DataFrame is empty
>>> df_non_empty = spark.createDataFrame(["a"], 'STRING')
>>> df_non_empty.isEmpty()
False
Example 3: Checking if a DataFrame with null values is empty
>>> df_nulls = spark.createDataFrame([(None, None)], 'a STRING, b INT')
>>> df_nulls.isEmpty()
False
Example 4: Checking if a DataFrame with no rows but with columns is empty
>>> df_no_rows = spark.createDataFrame([], 'id INT, value STRING')
>>> df_no_rows.isEmpty()
True
"""
return self._jdf.isEmpty()
| (self) -> bool |
39,394 | pyspark.sql.dataframe | isLocal | Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
bool
Examples
--------
>>> df = spark.sql("SHOW TABLES")
>>> df.isLocal()
True
| def isLocal(self) -> bool:
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
bool
Examples
--------
>>> df = spark.sql("SHOW TABLES")
>>> df.isLocal()
True
"""
return self._jdf.isLocal()
| (self) -> bool |
39,395 | pyspark.sql.dataframe | join | Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Returns
-------
:class:`DataFrame`
Joined DataFrame.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import desc
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")]).toDF("age", "name")
>>> df2 = spark.createDataFrame([Row(height=80, name="Tom"), Row(height=85, name="Bob")])
>>> df3 = spark.createDataFrame([Row(age=2, name="Alice"), Row(age=5, name="Bob")])
>>> df4 = spark.createDataFrame([
... Row(age=10, height=80, name="Alice"),
... Row(age=5, height=None, name="Bob"),
... Row(age=None, height=None, name="Tom"),
... Row(age=None, height=None, name=None),
... ])
Inner join on columns (default)
>>> df.join(df2, 'name').select(df.name, df2.height).show()
+----+------+
|name|height|
+----+------+
| Bob| 85|
+----+------+
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).show()
+----+---+
|name|age|
+----+---+
| Bob| 5|
+----+---+
Outer join for both DataFrames on the 'name' column.
>>> df.join(df2, df.name == df2.name, 'outer').select(
... df.name, df2.height).sort(desc("name")).show()
+-----+------+
| name|height|
+-----+------+
| Bob| 85|
|Alice| NULL|
| NULL| 80|
+-----+------+
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).show()
+-----+------+
| name|height|
+-----+------+
| Tom| 80|
| Bob| 85|
|Alice| NULL|
+-----+------+
Outer join for both DataFrams with multiple columns.
>>> df.join(
... df3,
... [df.name == df3.name, df.age == df3.age],
... 'outer'
... ).select(df.name, df3.age).show()
+-----+---+
| name|age|
+-----+---+
|Alice| 2|
| Bob| 5|
+-----+---+
| def join(
self,
other: "DataFrame",
on: Optional[Union[str, List[str], Column, List[Column]]] = None,
how: Optional[str] = None,
) -> "DataFrame":
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Returns
-------
:class:`DataFrame`
Joined DataFrame.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import desc
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")]).toDF("age", "name")
>>> df2 = spark.createDataFrame([Row(height=80, name="Tom"), Row(height=85, name="Bob")])
>>> df3 = spark.createDataFrame([Row(age=2, name="Alice"), Row(age=5, name="Bob")])
>>> df4 = spark.createDataFrame([
... Row(age=10, height=80, name="Alice"),
... Row(age=5, height=None, name="Bob"),
... Row(age=None, height=None, name="Tom"),
... Row(age=None, height=None, name=None),
... ])
Inner join on columns (default)
>>> df.join(df2, 'name').select(df.name, df2.height).show()
+----+------+
|name|height|
+----+------+
| Bob| 85|
+----+------+
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).show()
+----+---+
|name|age|
+----+---+
| Bob| 5|
+----+---+
Outer join for both DataFrames on the 'name' column.
>>> df.join(df2, df.name == df2.name, 'outer').select(
... df.name, df2.height).sort(desc("name")).show()
+-----+------+
| name|height|
+-----+------+
| Bob| 85|
|Alice| NULL|
| NULL| 80|
+-----+------+
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).show()
+-----+------+
| name|height|
+-----+------+
| Tom| 80|
| Bob| 85|
|Alice| NULL|
+-----+------+
Outer join for both DataFrams with multiple columns.
>>> df.join(
... df3,
... [df.name == df3.name, df.age == df3.age],
... 'outer'
... ).select(df.name, df3.age).show()
+-----+---+
| name|age|
+-----+---+
|Alice| 2|
| Bob| 5|
+-----+---+
"""
if on is not None and not isinstance(on, list):
on = [on] # type: ignore[assignment]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(cast(List[str], on))
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), cast(List[Column], on))
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame, on: Union[str, List[str], pyspark.sql.column.Column, List[pyspark.sql.column.Column], NoneType] = None, how: Optional[str] = None) -> pyspark.sql.dataframe.DataFrame |
39,396 | pyspark.sql.dataframe | limit | Limits the result count to the number specified.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
num : int
Number of records to return. Will return this number of records
or all records if the DataFrame contains less than this number of records.
Returns
-------
:class:`DataFrame`
Subset of the records
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.limit(1).show()
+---+----+
|age|name|
+---+----+
| 14| Tom|
+---+----+
>>> df.limit(0).show()
+---+----+
|age|name|
+---+----+
+---+----+
| def limit(self, num: int) -> "DataFrame":
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
num : int
Number of records to return. Will return this number of records
or all records if the DataFrame contains less than this number of records.
Returns
-------
:class:`DataFrame`
Subset of the records
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.limit(1).show()
+---+----+
|age|name|
+---+----+
| 14| Tom|
+---+----+
>>> df.limit(0).show()
+---+----+
|age|name|
+---+----+
+---+----+
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sparkSession)
| (self, num: int) -> pyspark.sql.dataframe.DataFrame |
39,397 | pyspark.sql.dataframe | localCheckpoint | Returns a locally checkpointed version of this :class:`DataFrame`. Checkpointing can be
used to truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional, default True
Whether to checkpoint this :class:`DataFrame` immediately.
Returns
-------
:class:`DataFrame`
Checkpointed DataFrame.
Notes
-----
This API is experimental.
Examples
--------
>>> df = spark.createDataFrame([
... (14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.localCheckpoint(False)
DataFrame[age: bigint, name: string]
| def localCheckpoint(self, eager: bool = True) -> "DataFrame":
"""Returns a locally checkpointed version of this :class:`DataFrame`. Checkpointing can be
used to truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional, default True
Whether to checkpoint this :class:`DataFrame` immediately.
Returns
-------
:class:`DataFrame`
Checkpointed DataFrame.
Notes
-----
This API is experimental.
Examples
--------
>>> df = spark.createDataFrame([
... (14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.localCheckpoint(False)
DataFrame[age: bigint, name: string]
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sparkSession)
| (self, eager: bool = True) -> pyspark.sql.dataframe.DataFrame |
39,398 | pyspark.sql.pandas.map_ops | mapInArrow |
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a PyArrow's `RecordBatch`, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pyarrow.RecordBatch`\s and return
another iterator of `pyarrow.RecordBatch`\s. All columns are passed
together as an iterator of `pyarrow.RecordBatch`\s to the function and the
returned iterator of `pyarrow.RecordBatch`\s are combined as a :class:`DataFrame`.
Each `pyarrow.RecordBatch` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`. The size of the function's input and
output can be different.
.. versionadded:: 3.3.0
Parameters
----------
func : function
a Python native function that takes an iterator of `pyarrow.RecordBatch`\s, and
outputs an iterator of `pyarrow.RecordBatch`\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
barrier : bool, optional, default False
Use barrier mode execution.
.. versionadded: 3.5.0
Examples
--------
>>> import pyarrow # doctest: +SKIP
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for batch in iterator:
... pdf = batch.to_pandas()
... yield pyarrow.RecordBatch.from_pandas(pdf[pdf.id == 1])
>>> df.mapInArrow(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Set ``barrier`` to ``True`` to force the ``mapInArrow`` stage running in the
barrier mode, it ensures all Python workers in the stage will be
launched concurrently.
>>> df.mapInArrow(filter_func, df.schema, barrier=True).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is unstable, and for developers.
See Also
--------
pyspark.sql.functions.pandas_udf
pyspark.sql.DataFrame.mapInPandas
| def mapInArrow(
self, func: "ArrowMapIterFunction", schema: Union[StructType, str], barrier: bool = False
) -> "DataFrame":
"""
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a PyArrow's `RecordBatch`, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pyarrow.RecordBatch`\\s and return
another iterator of `pyarrow.RecordBatch`\\s. All columns are passed
together as an iterator of `pyarrow.RecordBatch`\\s to the function and the
returned iterator of `pyarrow.RecordBatch`\\s are combined as a :class:`DataFrame`.
Each `pyarrow.RecordBatch` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`. The size of the function's input and
output can be different.
.. versionadded:: 3.3.0
Parameters
----------
func : function
a Python native function that takes an iterator of `pyarrow.RecordBatch`\\s, and
outputs an iterator of `pyarrow.RecordBatch`\\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
barrier : bool, optional, default False
Use barrier mode execution.
.. versionadded: 3.5.0
Examples
--------
>>> import pyarrow # doctest: +SKIP
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for batch in iterator:
... pdf = batch.to_pandas()
... yield pyarrow.RecordBatch.from_pandas(pdf[pdf.id == 1])
>>> df.mapInArrow(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Set ``barrier`` to ``True`` to force the ``mapInArrow`` stage running in the
barrier mode, it ensures all Python workers in the stage will be
launched concurrently.
>>> df.mapInArrow(filter_func, df.schema, barrier=True).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is unstable, and for developers.
See Also
--------
pyspark.sql.functions.pandas_udf
pyspark.sql.DataFrame.mapInPandas
"""
from pyspark.sql import DataFrame
from pyspark.sql.pandas.functions import pandas_udf
assert isinstance(self, DataFrame)
# The usage of the pandas_udf is internal so type checking is disabled.
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_MAP_ARROW_ITER_UDF
) # type: ignore[call-overload]
udf_column = udf(*[self[col] for col in self.columns])
jdf = self._jdf.pythonMapInArrow(udf_column._jc.expr(), barrier)
return DataFrame(jdf, self.sparkSession)
| (self, func: 'ArrowMapIterFunction', schema: Union[pyspark.sql.types.StructType, str], barrier: bool = False) -> 'DataFrame' |
39,399 | pyspark.sql.pandas.map_ops | mapInPandas |
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a pandas DataFrame, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pandas.DataFrame`\s and return
another iterator of `pandas.DataFrame`\s. All columns are passed
together as an iterator of `pandas.DataFrame`\s to the function and the
returned iterator of `pandas.DataFrame`\s are combined as a :class:`DataFrame`.
Each `pandas.DataFrame` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`. The size of the function's input and
output can be different.
.. versionadded:: 3.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
func : function
a Python native function that takes an iterator of `pandas.DataFrame`\s, and
outputs an iterator of `pandas.DataFrame`\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
barrier : bool, optional, default False
Use barrier mode execution.
.. versionadded: 3.5.0
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for pdf in iterator:
... yield pdf[pdf.id == 1]
...
>>> df.mapInPandas(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Set ``barrier`` to ``True`` to force the ``mapInPandas`` stage running in the
barrier mode, it ensures all Python workers in the stage will be
launched concurrently.
>>> df.mapInPandas(filter_func, df.schema, barrier=True).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is experimental
See Also
--------
pyspark.sql.functions.pandas_udf
| def mapInPandas(
self, func: "PandasMapIterFunction", schema: Union[StructType, str], barrier: bool = False
) -> "DataFrame":
"""
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a pandas DataFrame, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pandas.DataFrame`\\s and return
another iterator of `pandas.DataFrame`\\s. All columns are passed
together as an iterator of `pandas.DataFrame`\\s to the function and the
returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
Each `pandas.DataFrame` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`. The size of the function's input and
output can be different.
.. versionadded:: 3.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
func : function
a Python native function that takes an iterator of `pandas.DataFrame`\\s, and
outputs an iterator of `pandas.DataFrame`\\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
barrier : bool, optional, default False
Use barrier mode execution.
.. versionadded: 3.5.0
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for pdf in iterator:
... yield pdf[pdf.id == 1]
...
>>> df.mapInPandas(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Set ``barrier`` to ``True`` to force the ``mapInPandas`` stage running in the
barrier mode, it ensures all Python workers in the stage will be
launched concurrently.
>>> df.mapInPandas(filter_func, df.schema, barrier=True).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is experimental
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import DataFrame
from pyspark.sql.pandas.functions import pandas_udf
assert isinstance(self, DataFrame)
# The usage of the pandas_udf is internal so type checking is disabled.
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
) # type: ignore[call-overload]
udf_column = udf(*[self[col] for col in self.columns])
jdf = self._jdf.mapInPandas(udf_column._jc.expr(), barrier)
return DataFrame(jdf, self.sparkSession)
| (self, func: 'PandasMapIterFunction', schema: Union[pyspark.sql.types.StructType, str], barrier: bool = False) -> 'DataFrame' |
39,400 | pyspark.sql.dataframe | melt |
Unpivot a DataFrame from wide format to long format, optionally leaving
identifier columns set. This is the reverse to `groupBy(...).pivot(...).agg(...)`,
except for the aggregation, which cannot be reversed.
:func:`melt` is an alias for :func:`unpivot`.
.. versionadded:: 3.4.0
Parameters
----------
ids : str, Column, tuple, list, optional
Column(s) to use as identifiers. Can be a single column or column name,
or a list or tuple for multiple columns.
values : str, Column, tuple, list, optional
Column(s) to unpivot. Can be a single column or column name, or a list or tuple
for multiple columns. If not specified or empty, use all columns that
are not set as `ids`.
variableColumnName : str
Name of the variable column.
valueColumnName : str
Name of the value column.
Returns
-------
:class:`DataFrame`
Unpivoted DataFrame.
See Also
--------
DataFrame.unpivot
Notes
-----
Supports Spark Connect.
| def melt(
self,
ids: Union["ColumnOrName", List["ColumnOrName"], Tuple["ColumnOrName", ...]],
values: Optional[Union["ColumnOrName", List["ColumnOrName"], Tuple["ColumnOrName", ...]]],
variableColumnName: str,
valueColumnName: str,
) -> "DataFrame":
"""
Unpivot a DataFrame from wide format to long format, optionally leaving
identifier columns set. This is the reverse to `groupBy(...).pivot(...).agg(...)`,
except for the aggregation, which cannot be reversed.
:func:`melt` is an alias for :func:`unpivot`.
.. versionadded:: 3.4.0
Parameters
----------
ids : str, Column, tuple, list, optional
Column(s) to use as identifiers. Can be a single column or column name,
or a list or tuple for multiple columns.
values : str, Column, tuple, list, optional
Column(s) to unpivot. Can be a single column or column name, or a list or tuple
for multiple columns. If not specified or empty, use all columns that
are not set as `ids`.
variableColumnName : str
Name of the variable column.
valueColumnName : str
Name of the value column.
Returns
-------
:class:`DataFrame`
Unpivoted DataFrame.
See Also
--------
DataFrame.unpivot
Notes
-----
Supports Spark Connect.
"""
return self.unpivot(ids, values, variableColumnName, valueColumnName)
| (self, ids: Union[ForwardRef('ColumnOrName'), List[ForwardRef('ColumnOrName')], Tuple[ForwardRef('ColumnOrName'), ...]], values: Union[ForwardRef('ColumnOrName'), List[ForwardRef('ColumnOrName')], Tuple[ForwardRef('ColumnOrName'), ...], NoneType], variableColumnName: str, valueColumnName: str) -> 'DataFrame' |
39,401 | pyspark.sql.dataframe | observe | Define (named) metrics to observe on the DataFrame. This method returns an 'observed'
DataFrame that returns the same result as the input, with the following guarantees:
* It will compute the defined aggregates (metrics) on all the data that is flowing through
the Dataset at that point.
* It will report the value of the defined aggregate columns as soon as we reach a completion
point. A completion point is either the end of a query (batch mode) or the end of a
streaming epoch. The value of the aggregates only reflects the data processed since
the previous completion point.
The metrics columns must either contain a literal (e.g. lit(42)), or should contain one or
more aggregate functions (e.g. sum(a) or sum(a + b) + avg(c) - lit(1)). Expressions that
contain references to the input Dataset's columns must always be wrapped in an aggregate
function.
A user can observe these metrics by adding
Python's :class:`~pyspark.sql.streaming.StreamingQueryListener`,
Scala/Java's ``org.apache.spark.sql.streaming.StreamingQueryListener`` or Scala/Java's
``org.apache.spark.sql.util.QueryExecutionListener`` to the spark session.
.. versionadded:: 3.3.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Parameters
----------
observation : :class:`Observation` or str
`str` to specify the name, or an :class:`Observation` instance to obtain the metric.
.. versionchanged:: 3.4.0
Added support for `str` in this parameter.
exprs : :class:`Column`
column expressions (:class:`Column`).
Returns
-------
:class:`DataFrame`
the observed :class:`DataFrame`.
Notes
-----
When ``observation`` is :class:`Observation`, this method only supports batch queries.
When ``observation`` is a string, this method works for both batch and streaming queries.
Continuous execution is currently not supported yet.
Examples
--------
When ``observation`` is :class:`Observation`, only batch queries work as below.
>>> from pyspark.sql.functions import col, count, lit, max
>>> from pyspark.sql import Observation
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> observation = Observation("my metrics")
>>> observed_df = df.observe(observation, count(lit(1)).alias("count"), max(col("age")))
>>> observed_df.count()
2
>>> observation.get
{'count': 2, 'max(age)': 5}
When ``observation`` is a string, streaming queries also work as below.
>>> from pyspark.sql.streaming import StreamingQueryListener
>>> class MyErrorListener(StreamingQueryListener):
... def onQueryStarted(self, event):
... pass
...
... def onQueryProgress(self, event):
... row = event.progress.observedMetrics.get("my_event")
... # Trigger if the number of errors exceeds 5 percent
... num_rows = row.rc
... num_error_rows = row.erc
... ratio = num_error_rows / num_rows
... if ratio > 0.05:
... # Trigger alert
... pass
...
... def onQueryIdle(self, event):
... pass
...
... def onQueryTerminated(self, event):
... pass
...
>>> spark.streams.addListener(MyErrorListener())
>>> # Observe row count (rc) and error row count (erc) in the streaming Dataset
... observed_ds = df.observe(
... "my_event",
... count(lit(1)).alias("rc"),
... count(col("error")).alias("erc")) # doctest: +SKIP
>>> observed_ds.writeStream.format("console").start() # doctest: +SKIP
| def observe(
self,
observation: Union["Observation", str],
*exprs: Column,
) -> "DataFrame":
"""Define (named) metrics to observe on the DataFrame. This method returns an 'observed'
DataFrame that returns the same result as the input, with the following guarantees:
* It will compute the defined aggregates (metrics) on all the data that is flowing through
the Dataset at that point.
* It will report the value of the defined aggregate columns as soon as we reach a completion
point. A completion point is either the end of a query (batch mode) or the end of a
streaming epoch. The value of the aggregates only reflects the data processed since
the previous completion point.
The metrics columns must either contain a literal (e.g. lit(42)), or should contain one or
more aggregate functions (e.g. sum(a) or sum(a + b) + avg(c) - lit(1)). Expressions that
contain references to the input Dataset's columns must always be wrapped in an aggregate
function.
A user can observe these metrics by adding
Python's :class:`~pyspark.sql.streaming.StreamingQueryListener`,
Scala/Java's ``org.apache.spark.sql.streaming.StreamingQueryListener`` or Scala/Java's
``org.apache.spark.sql.util.QueryExecutionListener`` to the spark session.
.. versionadded:: 3.3.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Parameters
----------
observation : :class:`Observation` or str
`str` to specify the name, or an :class:`Observation` instance to obtain the metric.
.. versionchanged:: 3.4.0
Added support for `str` in this parameter.
exprs : :class:`Column`
column expressions (:class:`Column`).
Returns
-------
:class:`DataFrame`
the observed :class:`DataFrame`.
Notes
-----
When ``observation`` is :class:`Observation`, this method only supports batch queries.
When ``observation`` is a string, this method works for both batch and streaming queries.
Continuous execution is currently not supported yet.
Examples
--------
When ``observation`` is :class:`Observation`, only batch queries work as below.
>>> from pyspark.sql.functions import col, count, lit, max
>>> from pyspark.sql import Observation
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> observation = Observation("my metrics")
>>> observed_df = df.observe(observation, count(lit(1)).alias("count"), max(col("age")))
>>> observed_df.count()
2
>>> observation.get
{'count': 2, 'max(age)': 5}
When ``observation`` is a string, streaming queries also work as below.
>>> from pyspark.sql.streaming import StreamingQueryListener
>>> class MyErrorListener(StreamingQueryListener):
... def onQueryStarted(self, event):
... pass
...
... def onQueryProgress(self, event):
... row = event.progress.observedMetrics.get("my_event")
... # Trigger if the number of errors exceeds 5 percent
... num_rows = row.rc
... num_error_rows = row.erc
... ratio = num_error_rows / num_rows
... if ratio > 0.05:
... # Trigger alert
... pass
...
... def onQueryIdle(self, event):
... pass
...
... def onQueryTerminated(self, event):
... pass
...
>>> spark.streams.addListener(MyErrorListener())
>>> # Observe row count (rc) and error row count (erc) in the streaming Dataset
... observed_ds = df.observe(
... "my_event",
... count(lit(1)).alias("rc"),
... count(col("error")).alias("erc")) # doctest: +SKIP
>>> observed_ds.writeStream.format("console").start() # doctest: +SKIP
"""
from pyspark.sql import Observation
if len(exprs) == 0:
raise PySparkValueError(
error_class="CANNOT_BE_EMPTY",
message_parameters={"item": "exprs"},
)
if not all(isinstance(c, Column) for c in exprs):
raise PySparkTypeError(
error_class="NOT_LIST_OF_COLUMN",
message_parameters={"arg_name": "exprs"},
)
if isinstance(observation, Observation):
return observation._on(self, *exprs)
elif isinstance(observation, str):
return DataFrame(
self._jdf.observe(
observation, exprs[0]._jc, _to_seq(self._sc, [c._jc for c in exprs[1:]])
),
self.sparkSession,
)
else:
raise PySparkTypeError(
error_class="NOT_LIST_OF_COLUMN",
message_parameters={
"arg_name": "observation",
"arg_type": type(observation).__name__,
},
)
| (self, observation: Union[ForwardRef('Observation'), str], *exprs: pyspark.sql.column.Column) -> 'DataFrame' |
39,402 | pyspark.sql.dataframe | offset | Returns a new :class: `DataFrame` by skipping the first `n` rows.
.. versionadded:: 3.4.0
.. versionchanged:: 3.5.0
Supports vanilla PySpark.
Parameters
----------
num : int
Number of records to skip.
Returns
-------
:class:`DataFrame`
Subset of the records
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.offset(1).show()
+---+-----+
|age| name|
+---+-----+
| 23|Alice|
| 16| Bob|
+---+-----+
>>> df.offset(10).show()
+---+----+
|age|name|
+---+----+
+---+----+
| def offset(self, num: int) -> "DataFrame":
"""Returns a new :class: `DataFrame` by skipping the first `n` rows.
.. versionadded:: 3.4.0
.. versionchanged:: 3.5.0
Supports vanilla PySpark.
Parameters
----------
num : int
Number of records to skip.
Returns
-------
:class:`DataFrame`
Subset of the records
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.offset(1).show()
+---+-----+
|age| name|
+---+-----+
| 23|Alice|
| 16| Bob|
+---+-----+
>>> df.offset(10).show()
+---+----+
|age|name|
+---+----+
+---+----+
"""
jdf = self._jdf.offset(num)
return DataFrame(jdf, self.sparkSession)
| (self, num: int) -> pyspark.sql.dataframe.DataFrame |
39,403 | pyspark.sql.dataframe | sort | Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional, default True
boolean or list of boolean.
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, the length of the list must equal the length of the `cols`.
Returns
-------
:class:`DataFrame`
Sorted DataFrame.
Examples
--------
>>> from pyspark.sql.functions import desc, asc
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Sort the DataFrame in ascending order.
>>> df.sort(asc("age")).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
Sort the DataFrame in descending order.
>>> df.sort(df.age.desc()).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
+---+-----+
>>> df.orderBy(df.age.desc()).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
+---+-----+
>>> df.sort("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
+---+-----+
Specify multiple columns
>>> df = spark.createDataFrame([
... (2, "Alice"), (2, "Bob"), (5, "Bob")], schema=["age", "name"])
>>> df.orderBy(desc("age"), "name").show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
| 2| Bob|
+---+-----+
Specify multiple columns for sorting order at `ascending`.
>>> df.orderBy(["age", "name"], ascending=[False, False]).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2| Bob|
| 2|Alice|
+---+-----+
| def sort(
self, *cols: Union[str, Column, List[Union[str, Column]]], **kwargs: Any
) -> "DataFrame":
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional, default True
boolean or list of boolean.
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, the length of the list must equal the length of the `cols`.
Returns
-------
:class:`DataFrame`
Sorted DataFrame.
Examples
--------
>>> from pyspark.sql.functions import desc, asc
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Sort the DataFrame in ascending order.
>>> df.sort(asc("age")).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
Sort the DataFrame in descending order.
>>> df.sort(df.age.desc()).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
+---+-----+
>>> df.orderBy(df.age.desc()).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
+---+-----+
>>> df.sort("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
+---+-----+
Specify multiple columns
>>> df = spark.createDataFrame([
... (2, "Alice"), (2, "Bob"), (5, "Bob")], schema=["age", "name"])
>>> df.orderBy(desc("age"), "name").show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2|Alice|
| 2| Bob|
+---+-----+
Specify multiple columns for sorting order at `ascending`.
>>> df.orderBy(["age", "name"], ascending=[False, False]).show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 2| Bob|
| 2|Alice|
+---+-----+
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sparkSession)
| (self, *cols: Union[str, pyspark.sql.column.Column, List[Union[str, pyspark.sql.column.Column]]], **kwargs: Any) -> pyspark.sql.dataframe.DataFrame |
39,404 | pyspark.sql.dataframe | pandas_api |
Converts the existing DataFrame into a pandas-on-Spark DataFrame.
.. versionadded:: 3.2.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
If a pandas-on-Spark DataFrame is converted to a Spark DataFrame and then back
to pandas-on-Spark, it will lose the index information and the original index
will be turned into a normal column.
This is only available if Pandas is installed and available.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
:class:`PandasOnSparkDataFrame`
See Also
--------
pyspark.pandas.frame.DataFrame.to_spark
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.pandas_api() # doctest: +SKIP
age name
0 14 Tom
1 23 Alice
2 16 Bob
We can specify the index columns.
>>> df.pandas_api(index_col="age") # doctest: +SKIP
name
age
14 Tom
23 Alice
16 Bob
| def pandas_api(
self, index_col: Optional[Union[str, List[str]]] = None
) -> "PandasOnSparkDataFrame":
"""
Converts the existing DataFrame into a pandas-on-Spark DataFrame.
.. versionadded:: 3.2.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
If a pandas-on-Spark DataFrame is converted to a Spark DataFrame and then back
to pandas-on-Spark, it will lose the index information and the original index
will be turned into a normal column.
This is only available if Pandas is installed and available.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
:class:`PandasOnSparkDataFrame`
See Also
--------
pyspark.pandas.frame.DataFrame.to_spark
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.pandas_api() # doctest: +SKIP
age name
0 14 Tom
1 23 Alice
2 16 Bob
We can specify the index columns.
>>> df.pandas_api(index_col="age") # doctest: +SKIP
name
age
14 Tom
23 Alice
16 Bob
"""
from pyspark.pandas.namespace import _get_index_map
from pyspark.pandas.frame import DataFrame as PandasOnSparkDataFrame
from pyspark.pandas.internal import InternalFrame
index_spark_columns, index_names = _get_index_map(self, index_col)
internal = InternalFrame(
spark_frame=self,
index_spark_columns=index_spark_columns,
index_names=index_names, # type: ignore[arg-type]
)
return PandasOnSparkDataFrame(internal)
| (self, index_col: Union[str, List[str], NoneType] = None) -> 'PandasOnSparkDataFrame' |
39,405 | pyspark.sql.dataframe | persist | Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
Parameters
----------
storageLevel : :class:`StorageLevel`
Storage level to set for persistence. Default is MEMORY_AND_DISK_DESER.
Returns
-------
:class:`DataFrame`
Persisted DataFrame.
Examples
--------
>>> df = spark.range(1)
>>> df.persist()
DataFrame[id: bigint]
>>> df.explain()
== Physical Plan ==
AdaptiveSparkPlan isFinalPlan=false
+- InMemoryTableScan ...
Persists the data in the disk by specifying the storage level.
>>> from pyspark.storagelevel import StorageLevel
>>> df.persist(StorageLevel.DISK_ONLY)
DataFrame[id: bigint]
| def persist(
self,
storageLevel: StorageLevel = (StorageLevel.MEMORY_AND_DISK_DESER),
) -> "DataFrame":
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
Parameters
----------
storageLevel : :class:`StorageLevel`
Storage level to set for persistence. Default is MEMORY_AND_DISK_DESER.
Returns
-------
:class:`DataFrame`
Persisted DataFrame.
Examples
--------
>>> df = spark.range(1)
>>> df.persist()
DataFrame[id: bigint]
>>> df.explain()
== Physical Plan ==
AdaptiveSparkPlan isFinalPlan=false
+- InMemoryTableScan ...
Persists the data in the disk by specifying the storage level.
>>> from pyspark.storagelevel import StorageLevel
>>> df.persist(StorageLevel.DISK_ONLY)
DataFrame[id: bigint]
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
| (self, storageLevel: pyspark.storagelevel.StorageLevel = StorageLevel(True, True, False, True, 1)) -> pyspark.sql.dataframe.DataFrame |
39,406 | pyspark.sql.dataframe | printSchema | Prints out the schema in the tree format.
Optionally allows to specify how many levels to print if schema is nested.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
level : int, optional, default None
How many levels to print for nested schemas.
.. versionchanged:: 3.5.0
Added Level parameter.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.printSchema()
root
|-- age: long (nullable = true)
|-- name: string (nullable = true)
>>> df = spark.createDataFrame([(1, (2,2))], ["a", "b"])
>>> df.printSchema(1)
root
|-- a: long (nullable = true)
|-- b: struct (nullable = true)
>>> df.printSchema(2)
root
|-- a: long (nullable = true)
|-- b: struct (nullable = true)
| |-- _1: long (nullable = true)
| |-- _2: long (nullable = true)
| def printSchema(self, level: Optional[int] = None) -> None:
"""Prints out the schema in the tree format.
Optionally allows to specify how many levels to print if schema is nested.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
level : int, optional, default None
How many levels to print for nested schemas.
.. versionchanged:: 3.5.0
Added Level parameter.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.printSchema()
root
|-- age: long (nullable = true)
|-- name: string (nullable = true)
>>> df = spark.createDataFrame([(1, (2,2))], ["a", "b"])
>>> df.printSchema(1)
root
|-- a: long (nullable = true)
|-- b: struct (nullable = true)
>>> df.printSchema(2)
root
|-- a: long (nullable = true)
|-- b: struct (nullable = true)
| |-- _1: long (nullable = true)
| |-- _2: long (nullable = true)
"""
if level:
print(self._jdf.schema().treeString(level))
else:
print(self._jdf.schema().treeString())
| (self, level: Optional[int] = None) -> NoneType |
39,407 | pyspark.sql.dataframe | randomSplit | Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Returns
-------
list
List of DataFrames.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(age=10, height=80, name="Alice"),
... Row(age=5, height=None, name="Bob"),
... Row(age=None, height=None, name="Tom"),
... Row(age=None, height=None, name=None),
... ])
>>> splits = df.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
| def randomSplit(self, weights: List[float], seed: Optional[int] = None) -> List["DataFrame"]:
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Returns
-------
list
List of DataFrames.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([
... Row(age=10, height=80, name="Alice"),
... Row(age=5, height=None, name="Bob"),
... Row(age=None, height=None, name="Tom"),
... Row(age=None, height=None, name=None),
... ])
>>> splits = df.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise PySparkValueError(
error_class="VALUE_NOT_POSITIVE",
message_parameters={"arg_name": "weights", "arg_value": str(w)},
)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
df_array = self._jdf.randomSplit(
_to_list(self.sparkSession._sc, cast(List["ColumnOrName"], weights)), int(seed)
)
return [DataFrame(df, self.sparkSession) for df in df_array]
| (self, weights: List[float], seed: Optional[int] = None) -> List[pyspark.sql.dataframe.DataFrame] |
39,408 | pyspark.sql.dataframe | registerTempTable | Registers this :class:`DataFrame` as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Parameters
----------
name : str
Name of the temporary table to register.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.registerTempTable("people")
>>> df2 = spark.sql("SELECT * FROM people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
True
| def registerTempTable(self, name: str) -> None:
"""Registers this :class:`DataFrame` as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Parameters
----------
name : str
Name of the temporary table to register.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.registerTempTable("people")
>>> df2 = spark.sql("SELECT * FROM people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
True
"""
warnings.warn("Deprecated in 2.0, use createOrReplaceTempView instead.", FutureWarning)
self._jdf.createOrReplaceTempView(name)
| (self, name: str) -> NoneType |
39,409 | pyspark.sql.dataframe | repartition |
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6.0
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Returns
-------
:class:`DataFrame`
Repartitioned DataFrame.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Repartition the data into 10 partitions.
>>> df.repartition(10).rdd.getNumPartitions()
10
Repartition the data into 7 partitions by 'age' column.
>>> df.repartition(7, "age").rdd.getNumPartitions()
7
Repartition the data into 7 partitions by 'age' and 'name columns.
>>> df.repartition(3, "name", "age").rdd.getNumPartitions()
3
| def repartition( # type: ignore[misc]
self, numPartitions: Union[int, "ColumnOrName"], *cols: "ColumnOrName"
) -> "DataFrame":
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6.0
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Returns
-------
:class:`DataFrame`
Repartitioned DataFrame.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Repartition the data into 10 partitions.
>>> df.repartition(10).rdd.getNumPartitions()
10
Repartition the data into 7 partitions by 'age' column.
>>> df.repartition(7, "age").rdd.getNumPartitions()
7
Repartition the data into 7 partitions by 'age' and 'name columns.
>>> df.repartition(3, "name", "age").rdd.getNumPartitions()
3
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sparkSession)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)),
self.sparkSession,
)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sparkSession)
else:
raise PySparkTypeError(
error_class="NOT_COLUMN_OR_STR",
message_parameters={
"arg_name": "numPartitions",
"arg_type": type(numPartitions).__name__,
},
)
| (self, numPartitions: Union[int, ForwardRef('ColumnOrName')], *cols: 'ColumnOrName') -> 'DataFrame' |
39,410 | pyspark.sql.dataframe | repartitionByRange |
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
.. versionadded:: 2.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Returns
-------
:class:`DataFrame`
Repartitioned DataFrame.
Notes
-----
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Repartition the data into 2 partitions by range in 'age' column.
For example, the first partition can have ``(14, "Tom")``, and the second
partition would have ``(16, "Bob")`` and ``(23, "Alice")``.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
| def repartitionByRange( # type: ignore[misc]
self, numPartitions: Union[int, "ColumnOrName"], *cols: "ColumnOrName"
) -> "DataFrame":
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
.. versionadded:: 2.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Returns
-------
:class:`DataFrame`
Repartitioned DataFrame.
Notes
-----
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Repartition the data into 2 partitions by range in 'age' column.
For example, the first partition can have ``(14, "Tom")``, and the second
partition would have ``(16, "Bob")`` and ``(23, "Alice")``.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
raise PySparkValueError(
error_class="CANNOT_BE_EMPTY",
message_parameters={"item": "partition-by expression"},
)
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)),
self.sparkSession,
)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sparkSession)
else:
raise PySparkTypeError(
error_class="NOT_COLUMN_OR_INT_OR_STR",
message_parameters={
"arg_name": "numPartitions",
"arg_type": type(numPartitions).__name__,
},
)
| (self, numPartitions: Union[int, ForwardRef('ColumnOrName')], *cols: 'ColumnOrName') -> 'DataFrame' |
39,411 | pyspark.sql.dataframe | replace | Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data types are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Returns
-------
:class:`DataFrame`
DataFrame with replaced values.
Examples
--------
>>> df = spark.createDataFrame([
... (10, 80, "Alice"),
... (5, None, "Bob"),
... (None, 10, "Tom"),
... (None, None, None)],
... schema=["age", "height", "name"])
Replace 10 to 20 in all columns.
>>> df.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| NULL| Bob|
|NULL| 20| Tom|
|NULL| NULL| NULL|
+----+------+-----+
Replace 'Alice' to null in all columns.
>>> df.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|NULL|
| 5| NULL| Bob|
|NULL| 10| Tom|
|NULL| NULL|NULL|
+----+------+----+
Replace 'Alice' to 'A', and 'Bob' to 'B' in the 'name' column.
>>> df.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| NULL| B|
|NULL| 10| Tom|
|NULL| NULL|NULL|
+----+------+----+
| def replace( # type: ignore[misc]
self,
to_replace: Union[
"LiteralType", List["LiteralType"], Dict["LiteralType", "OptionalPrimitiveType"]
],
value: Optional[
Union["OptionalPrimitiveType", List["OptionalPrimitiveType"], _NoValueType]
] = _NoValue,
subset: Optional[List[str]] = None,
) -> "DataFrame":
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data types are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Returns
-------
:class:`DataFrame`
DataFrame with replaced values.
Examples
--------
>>> df = spark.createDataFrame([
... (10, 80, "Alice"),
... (5, None, "Bob"),
... (None, 10, "Tom"),
... (None, None, None)],
... schema=["age", "height", "name"])
Replace 10 to 20 in all columns.
>>> df.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| NULL| Bob|
|NULL| 20| Tom|
|NULL| NULL| NULL|
+----+------+-----+
Replace 'Alice' to null in all columns.
>>> df.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|NULL|
| 5| NULL| Bob|
|NULL| 10| Tom|
|NULL| NULL|NULL|
+----+------+----+
Replace 'Alice' to 'A', and 'Bob' to 'B' in the 'name' column.
>>> df.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| NULL| B|
|NULL| 10| Tom|
|NULL| NULL|NULL|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise PySparkTypeError(
error_class="ARGUMENT_REQUIRED",
message_parameters={"arg_name": "value", "condition": "`to_replace` is dict"},
)
# Helper functions
def all_of(types: Union[Type, Tuple[Type, ...]]) -> Callable[[Iterable], bool]:
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs: Iterable) -> bool:
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict,)):
raise PySparkTypeError(
error_class="NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE",
message_parameters={
"arg_name": "to_replace",
"arg_type": type(to_replace).__name__,
},
)
if (
not isinstance(value, valid_types)
and value is not None
and not isinstance(to_replace, dict)
):
raise PySparkTypeError(
error_class="NOT_BOOL_OR_FLOAT_OR_INT_OR_LIST_OR_NONE_OR_STR_OR_TUPLE",
message_parameters={
"arg_name": "value",
"arg_type": type(value).__name__,
},
)
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise PySparkValueError(
error_class="LENGTH_SHOULD_BE_THE_SAME",
message_parameters={
"arg1": "to_replace",
"arg2": "value",
"arg1_length": str(len(to_replace)),
"arg2_length": str(len(value)),
},
)
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise PySparkTypeError(
error_class="NOT_LIST_OR_STR_OR_TUPLE",
message_parameters={"arg_name": "subset", "arg_type": type(subset).__name__},
)
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, cast("Iterable[Optional[Union[float, str]]]", value)))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(
all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]
):
raise PySparkValueError(
error_class="MIXED_TYPE_REPLACEMENT",
message_parameters={},
)
if subset is None:
return DataFrame(self._jdf.na().replace("*", rep_dict), self.sparkSession)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)),
self.sparkSession,
)
| (self, to_replace: Union[ForwardRef('LiteralType'), List[ForwardRef('LiteralType')], Dict[ForwardRef('LiteralType'), ForwardRef('OptionalPrimitiveType')]], value: Union[ForwardRef('OptionalPrimitiveType'), List[ForwardRef('OptionalPrimitiveType')], pyspark._globals._NoValueType, NoneType] = <no value>, subset: Optional[List[str]] = None) -> 'DataFrame' |
39,412 | pyspark.sql.dataframe | rollup |
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list, str or :class:`Column`
Columns to roll-up by.
Each element should be a column name (string) or an expression (:class:`Column`)
or list of them.
Returns
-------
:class:`GroupedData`
Rolled-up data by given columns.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| NULL|NULL| 2|
|Alice|NULL| 1|
|Alice| 2| 1|
| Bob|NULL| 1|
| Bob| 5| 1|
+-----+----+-----+
| def rollup(self, *cols: "ColumnOrName") -> "GroupedData": # type: ignore[misc]
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : list, str or :class:`Column`
Columns to roll-up by.
Each element should be a column name (string) or an expression (:class:`Column`)
or list of them.
Returns
-------
:class:`GroupedData`
Rolled-up data by given columns.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| NULL|NULL| 2|
|Alice|NULL| 1|
|Alice| 2| 1|
| Bob|NULL| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
| (self, *cols: 'ColumnOrName') -> 'GroupedData' |
39,413 | pyspark.sql.dataframe | sameSemantics |
Returns `True` when the logical query plans inside both :class:`DataFrame`\s are equal and
therefore return the same results.
.. versionadded:: 3.1.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Parameters
----------
other : :class:`DataFrame`
The other DataFrame to compare against.
Returns
-------
bool
Whether these two DataFrames are similar.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
| def sameSemantics(self, other: "DataFrame") -> bool:
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return the same results.
.. versionadded:: 3.1.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Parameters
----------
other : :class:`DataFrame`
The other DataFrame to compare against.
Returns
-------
bool
Whether these two DataFrames are similar.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise PySparkTypeError(
error_class="NOT_STR",
message_parameters={"arg_name": "other", "arg_type": type(other).__name__},
)
return self._jdf.sameSemantics(other._jdf)
| (self, other: pyspark.sql.dataframe.DataFrame) -> bool |
39,414 | pyspark.sql.dataframe | sample | Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Returns
-------
:class:`DataFrame`
Sampled rows from given DataFrame.
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count() # doctest: +SKIP
7
>>> df.sample(fraction=0.5, seed=3).count() # doctest: +SKIP
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() # doctest: +SKIP
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
| def sample( # type: ignore[misc]
self,
withReplacement: Optional[Union[float, bool]] = None,
fraction: Optional[Union[int, float]] = None,
seed: Optional[int] = None,
) -> "DataFrame":
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Returns
-------
:class:`DataFrame`
Sampled rows from given DataFrame.
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count() # doctest: +SKIP
7
>>> df.sample(fraction=0.5, seed=3).count() # doctest: +SKIP
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() # doctest: +SKIP
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (
is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args
):
argtypes = [type(arg).__name__ for arg in [withReplacement, fraction, seed]]
raise PySparkTypeError(
error_class="NOT_BOOL_OR_FLOAT_OR_INT",
message_parameters={
"arg_name": "withReplacement (optional), "
+ "fraction (required) and seed (optional)",
"arg_type": ", ".join(argtypes),
},
)
if is_withReplacement_omitted_args:
if fraction is not None:
seed = cast(int, fraction)
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sparkSession)
| (self, withReplacement: Union[float, bool, NoneType] = None, fraction: Union[int, float, NoneType] = None, seed: Optional[int] = None) -> pyspark.sql.dataframe.DataFrame |
39,415 | pyspark.sql.dataframe | sampleBy |
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = spark.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
| def sampleBy(
self, col: "ColumnOrName", fractions: Dict[Any, float], seed: Optional[int] = None
) -> "DataFrame":
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = spark.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise PySparkTypeError(
error_class="NOT_COLUMN_OR_STR",
message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
)
if not isinstance(fractions, dict):
raise PySparkTypeError(
error_class="NOT_DICT",
message_parameters={"arg_name": "fractions", "arg_type": type(fractions).__name__},
)
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise PySparkTypeError(
error_class="DISALLOWED_TYPE_FOR_CONTAINER",
message_parameters={
"arg_name": "fractions",
"arg_type": type(fractions).__name__,
"allowed_types": "float, int, str",
"return_type": type(k).__name__,
},
)
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(
self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sparkSession
)
| (self, col: 'ColumnOrName', fractions: Dict[Any, float], seed: Optional[int] = None) -> 'DataFrame' |
39,416 | pyspark.sql.dataframe | select | Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Returns
-------
:class:`DataFrame`
A DataFrame with subset (or all) of columns.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Select all columns in the DataFrame.
>>> df.select('*').show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
Select a column with other expressions in the DataFrame.
>>> df.select(df.name, (df.age + 10).alias('age')).show()
+-----+---+
| name|age|
+-----+---+
|Alice| 12|
| Bob| 15|
+-----+---+
| def select(self, *cols: "ColumnOrName") -> "DataFrame": # type: ignore[misc]
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Returns
-------
:class:`DataFrame`
A DataFrame with subset (or all) of columns.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Select all columns in the DataFrame.
>>> df.select('*').show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
Select a column with other expressions in the DataFrame.
>>> df.select(df.name, (df.age + 10).alias('age')).show()
+-----+---+
| name|age|
+-----+---+
|Alice| 12|
| Bob| 15|
+-----+---+
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sparkSession)
| (self, *cols: 'ColumnOrName') -> 'DataFrame' |
39,417 | pyspark.sql.dataframe | selectExpr | Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
:class:`DataFrame`
A DataFrame with new/old columns transformed by expressions.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.selectExpr("age * 2", "abs(age)").show()
+---------+--------+
|(age * 2)|abs(age)|
+---------+--------+
| 4| 2|
| 10| 5|
+---------+--------+
| def selectExpr(self, *expr: Union[str, List[str]]) -> "DataFrame":
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Returns
-------
:class:`DataFrame`
A DataFrame with new/old columns transformed by expressions.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.selectExpr("age * 2", "abs(age)").show()
+---------+--------+
|(age * 2)|abs(age)|
+---------+--------+
| 4| 2|
| 10| 5|
+---------+--------+
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0] # type: ignore[assignment]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sparkSession)
| (self, *expr: Union[str, List[str]]) -> pyspark.sql.dataframe.DataFrame |
39,418 | pyspark.sql.dataframe | semanticHash |
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Returns
-------
int
Hash value.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
| def semanticHash(self) -> int:
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Returns
-------
int
Hash value.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
| (self) -> int |
39,419 | pyspark.sql.dataframe | show | Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool or int, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df = spark.createDataFrame([
... (14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Show only top 2 rows.
>>> df.show(2)
+---+-----+
|age| name|
+---+-----+
| 14| Tom|
| 23|Alice|
+---+-----+
only showing top 2 rows
Show :class:`DataFrame` where the maximum number of characters is 3.
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 14| Tom|
| 23| Ali|
| 16| Bob|
+---+----+
Show :class:`DataFrame` vertically.
>>> df.show(vertical=True)
-RECORD 0-----
age | 14
name | Tom
-RECORD 1-----
age | 23
name | Alice
-RECORD 2-----
age | 16
name | Bob
| def show(self, n: int = 20, truncate: Union[bool, int] = True, vertical: bool = False) -> None:
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool or int, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df = spark.createDataFrame([
... (14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Show only top 2 rows.
>>> df.show(2)
+---+-----+
|age| name|
+---+-----+
| 14| Tom|
| 23|Alice|
+---+-----+
only showing top 2 rows
Show :class:`DataFrame` where the maximum number of characters is 3.
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 14| Tom|
| 23| Ali|
| 16| Bob|
+---+----+
Show :class:`DataFrame` vertically.
>>> df.show(vertical=True)
-RECORD 0-----
age | 14
name | Tom
-RECORD 1-----
age | 23
name | Alice
-RECORD 2-----
age | 16
name | Bob
"""
print(self._show_string(n, truncate, vertical))
| (self, n: int = 20, truncate: Union[bool, int] = True, vertical: bool = False) -> NoneType |
39,421 | pyspark.sql.dataframe | sortWithinPartitions | Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional, default True
boolean or list of boolean.
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, the length of the list must equal the length of the `cols`.
Returns
-------
:class:`DataFrame`
DataFrame sorted by partitions.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.sortWithinPartitions("age", ascending=False)
DataFrame[age: bigint, name: string]
| def sortWithinPartitions(
self, *cols: Union[str, Column, List[Union[str, Column]]], **kwargs: Any
) -> "DataFrame":
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional, default True
boolean or list of boolean.
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, the length of the list must equal the length of the `cols`.
Returns
-------
:class:`DataFrame`
DataFrame sorted by partitions.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.sortWithinPartitions("age", ascending=False)
DataFrame[age: bigint, name: string]
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sparkSession)
| (self, *cols: Union[str, pyspark.sql.column.Column, List[Union[str, pyspark.sql.column.Column]]], **kwargs: Any) -> pyspark.sql.dataframe.DataFrame |
39,422 | pyspark.sql.dataframe | subtract | Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be subtracted.
Returns
-------
:class:`DataFrame`
Subtracted DataFrame.
Notes
-----
This is equivalent to `EXCEPT DISTINCT` in SQL.
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.subtract(df2).show()
+---+---+
| C1| C2|
+---+---+
| c| 4|
+---+---+
| def subtract(self, other: "DataFrame") -> "DataFrame":
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be subtracted.
Returns
-------
:class:`DataFrame`
Subtracted DataFrame.
Notes
-----
This is equivalent to `EXCEPT DISTINCT` in SQL.
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.subtract(df2).show()
+---+---+
| C1| C2|
+---+---+
| c| 4|
+---+---+
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame |
39,423 | pyspark.sql.dataframe | summary | Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
statistics : str, optional
Column names to calculate statistics by (default All columns).
Returns
-------
:class:`DataFrame`
A new DataFrame that provides statistics for the given DataFrame.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df = spark.createDataFrame(
... [("Bob", 13, 40.3, 150.5), ("Alice", 12, 37.8, 142.3), ("Tom", 11, 44.1, 142.2)],
... ["name", "age", "weight", "height"],
... )
>>> df.select("age", "weight", "height").summary().show()
+-------+----+------------------+-----------------+
|summary| age| weight| height|
+-------+----+------------------+-----------------+
| count| 3| 3| 3|
| mean|12.0| 40.73333333333333| 145.0|
| stddev| 1.0|3.1722757341273704|4.763402145525822|
| min| 11| 37.8| 142.2|
| 25%| 11| 37.8| 142.2|
| 50%| 12| 40.3| 142.3|
| 75%| 13| 44.1| 150.5|
| max| 13| 44.1| 150.5|
+-------+----+------------------+-----------------+
>>> df.select("age", "weight", "height").summary("count", "min", "25%", "75%", "max").show()
+-------+---+------+------+
|summary|age|weight|height|
+-------+---+------+------+
| count| 3| 3| 3|
| min| 11| 37.8| 142.2|
| 25%| 11| 37.8| 142.2|
| 75%| 13| 44.1| 150.5|
| max| 13| 44.1| 150.5|
+-------+---+------+------+
See Also
--------
DataFrame.display
| def summary(self, *statistics: str) -> "DataFrame":
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
statistics : str, optional
Column names to calculate statistics by (default All columns).
Returns
-------
:class:`DataFrame`
A new DataFrame that provides statistics for the given DataFrame.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df = spark.createDataFrame(
... [("Bob", 13, 40.3, 150.5), ("Alice", 12, 37.8, 142.3), ("Tom", 11, 44.1, 142.2)],
... ["name", "age", "weight", "height"],
... )
>>> df.select("age", "weight", "height").summary().show()
+-------+----+------------------+-----------------+
|summary| age| weight| height|
+-------+----+------------------+-----------------+
| count| 3| 3| 3|
| mean|12.0| 40.73333333333333| 145.0|
| stddev| 1.0|3.1722757341273704|4.763402145525822|
| min| 11| 37.8| 142.2|
| 25%| 11| 37.8| 142.2|
| 50%| 12| 40.3| 142.3|
| 75%| 13| 44.1| 150.5|
| max| 13| 44.1| 150.5|
+-------+----+------------------+-----------------+
>>> df.select("age", "weight", "height").summary("count", "min", "25%", "75%", "max").show()
+-------+---+------+------+
|summary|age|weight|height|
+-------+---+------+------+
| count| 3| 3| 3|
| min| 11| 37.8| 142.2|
| 25%| 11| 37.8| 142.2|
| 75%| 13| 44.1| 150.5|
| max| 13| 44.1| 150.5|
+-------+---+------+------+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sparkSession)
| (self, *statistics: str) -> pyspark.sql.dataframe.DataFrame |
39,424 | pyspark.sql.dataframe | tail |
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
num : int
Number of records to return. Will return this number of records
or all records if the DataFrame contains less than this number of records.
Returns
-------
list
List of rows
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.tail(2)
[Row(age=23, name='Alice'), Row(age=16, name='Bob')]
| def tail(self, num: int) -> List[Row]:
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
num : int
Number of records to return. Will return this number of records
or all records if the DataFrame contains less than this number of records.
Returns
-------
list
List of rows
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.tail(2)
[Row(age=23, name='Alice'), Row(age=16, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(CPickleSerializer())))
| (self, num: int) -> List[pyspark.sql.types.Row] |
39,425 | pyspark.sql.dataframe | take | Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
num : int
Number of records to return. Will return this number of records
or all records if the DataFrame contains less than this number of records..
Returns
-------
list
List of rows
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Return the first 2 rows of the :class:`DataFrame`.
>>> df.take(2)
[Row(age=14, name='Tom'), Row(age=23, name='Alice')]
| def take(self, num: int) -> List[Row]:
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
num : int
Number of records to return. Will return this number of records
or all records if the DataFrame contains less than this number of records..
Returns
-------
list
List of rows
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
Return the first 2 rows of the :class:`DataFrame`.
>>> df.take(2)
[Row(age=14, name='Tom'), Row(age=23, name='Alice')]
"""
return self.limit(num).collect()
| (self, num: int) -> List[pyspark.sql.types.Row] |
39,426 | pyspark.sql.dataframe | to |
Returns a new :class:`DataFrame` where each row is reconciled to match the specified
schema.
.. versionadded:: 3.4.0
Parameters
----------
schema : :class:`StructType`
Specified schema.
Returns
-------
:class:`DataFrame`
Reconciled DataFrame.
Notes
-----
* Reorder columns and/or inner fields by name to match the specified schema.
* Project away columns and/or inner fields that are not needed by the specified schema.
Missing columns and/or inner fields (present in the specified schema but not input
DataFrame) lead to failures.
* Cast the columns and/or inner fields to match the data types in the specified schema,
if the types are compatible, e.g., numeric to numeric (error if overflows), but
not string to int.
* Carry over the metadata from the specified schema, while the columns and/or inner fields
still keep their own metadata if not overwritten by the specified schema.
* Fail if the nullability is not compatible. For example, the column and/or inner field
is nullable but the specified schema requires them to be not nullable.
Supports Spark Connect.
Examples
--------
>>> from pyspark.sql.types import StructField, StringType
>>> df = spark.createDataFrame([("a", 1)], ["i", "j"])
>>> df.schema
StructType([StructField('i', StringType(), True), StructField('j', LongType(), True)])
>>> schema = StructType([StructField("j", StringType()), StructField("i", StringType())])
>>> df2 = df.to(schema)
>>> df2.schema
StructType([StructField('j', StringType(), True), StructField('i', StringType(), True)])
>>> df2.show()
+---+---+
| j| i|
+---+---+
| 1| a|
+---+---+
| def to(self, schema: StructType) -> "DataFrame":
"""
Returns a new :class:`DataFrame` where each row is reconciled to match the specified
schema.
.. versionadded:: 3.4.0
Parameters
----------
schema : :class:`StructType`
Specified schema.
Returns
-------
:class:`DataFrame`
Reconciled DataFrame.
Notes
-----
* Reorder columns and/or inner fields by name to match the specified schema.
* Project away columns and/or inner fields that are not needed by the specified schema.
Missing columns and/or inner fields (present in the specified schema but not input
DataFrame) lead to failures.
* Cast the columns and/or inner fields to match the data types in the specified schema,
if the types are compatible, e.g., numeric to numeric (error if overflows), but
not string to int.
* Carry over the metadata from the specified schema, while the columns and/or inner fields
still keep their own metadata if not overwritten by the specified schema.
* Fail if the nullability is not compatible. For example, the column and/or inner field
is nullable but the specified schema requires them to be not nullable.
Supports Spark Connect.
Examples
--------
>>> from pyspark.sql.types import StructField, StringType
>>> df = spark.createDataFrame([("a", 1)], ["i", "j"])
>>> df.schema
StructType([StructField('i', StringType(), True), StructField('j', LongType(), True)])
>>> schema = StructType([StructField("j", StringType()), StructField("i", StringType())])
>>> df2 = df.to(schema)
>>> df2.schema
StructType([StructField('j', StringType(), True), StructField('i', StringType(), True)])
>>> df2.show()
+---+---+
| j| i|
+---+---+
| 1| a|
+---+---+
"""
assert schema is not None
jschema = self._jdf.sparkSession().parseDataType(schema.json())
return DataFrame(self._jdf.to(jschema), self.sparkSession)
| (self, schema: pyspark.sql.types.StructType) -> pyspark.sql.dataframe.DataFrame |
39,427 | pyspark.sql.dataframe | toDF | Returns a new :class:`DataFrame` that with new specified column names
.. versionadded:: 1.6.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
*cols : tuple
a tuple of string new column name. The length of the
list needs to be the same as the number of columns in the initial
:class:`DataFrame`
Returns
-------
:class:`DataFrame`
DataFrame with new column names.
Examples
--------
>>> df = spark.createDataFrame([(14, "Tom"), (23, "Alice"),
... (16, "Bob")], ["age", "name"])
>>> df.toDF('f1', 'f2').show()
+---+-----+
| f1| f2|
+---+-----+
| 14| Tom|
| 23|Alice|
| 16| Bob|
+---+-----+
| def toDF(self, *cols: str) -> "DataFrame":
"""Returns a new :class:`DataFrame` that with new specified column names
.. versionadded:: 1.6.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
*cols : tuple
a tuple of string new column name. The length of the
list needs to be the same as the number of columns in the initial
:class:`DataFrame`
Returns
-------
:class:`DataFrame`
DataFrame with new column names.
Examples
--------
>>> df = spark.createDataFrame([(14, "Tom"), (23, "Alice"),
... (16, "Bob")], ["age", "name"])
>>> df.toDF('f1', 'f2').show()
+---+-----+
| f1| f2|
+---+-----+
| 14| Tom|
| 23|Alice|
| 16| Bob|
+---+-----+
"""
for col in cols:
if not isinstance(col, str):
raise PySparkTypeError(
error_class="NOT_LIST_OF_STR",
message_parameters={"arg_name": "cols", "arg_type": type(col).__name__},
)
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sparkSession)
| (self, *cols: str) -> pyspark.sql.dataframe.DataFrame |
39,428 | pyspark.sql.dataframe | toJSON | Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Parameters
----------
use_unicode : bool, optional, default True
Whether to convert to unicode or not.
Returns
-------
:class:`RDD`
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
| def toJSON(self, use_unicode: bool = True) -> RDD[str]:
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Parameters
----------
use_unicode : bool, optional, default True
Whether to convert to unicode or not.
Returns
-------
:class:`RDD`
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
| (self, use_unicode: bool = True) -> pyspark.rdd.RDD[str] |
39,429 | pyspark.sql.dataframe | toLocalIterator |
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
.. versionchanged:: 3.4.0
This argument does not take effect for Spark Connect.
Returns
-------
Iterator
Iterator of rows.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> list(df.toLocalIterator())
[Row(age=14, name='Tom'), Row(age=23, name='Alice'), Row(age=16, name='Bob')]
| def toLocalIterator(self, prefetchPartitions: bool = False) -> Iterator[Row]:
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
.. versionchanged:: 3.4.0
This argument does not take effect for Spark Connect.
Returns
-------
Iterator
Iterator of rows.
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> list(df.toLocalIterator())
[Row(age=14, name='Tom'), Row(age=23, name='Alice'), Row(age=16, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(CPickleSerializer()))
| (self, prefetchPartitions: bool = False) -> Iterator[pyspark.sql.types.Row] |
39,430 | pyspark.sql.pandas.conversion | toPandas |
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
This method should only be used if the resulting Pandas ``pandas.DataFrame`` is
expected to be small, as all the data is loaded into the driver's memory.
Usage with ``spark.sql.execution.arrow.pyspark.enabled=True`` is experimental.
Examples
--------
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
| def toPandas(self) -> "PandasDataFrameLike":
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
This method should only be used if the resulting Pandas ``pandas.DataFrame`` is
expected to be small, as all the data is loaded into the driver's memory.
Usage with ``spark.sql.execution.arrow.pyspark.enabled=True`` is experimental.
Examples
--------
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
from pyspark.sql.pandas.types import _create_converter_to_pandas
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
jconf = self.sparkSession._jconf
if jconf.arrowPySparkEnabled():
use_arrow = True
try:
from pyspark.sql.pandas.types import to_arrow_schema
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if jconf.arrowPySparkFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % str(e)
)
warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to "
"false.\n %s" % str(e)
)
warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.
if use_arrow:
try:
import pyarrow
self_destruct = jconf.arrowPySparkSelfDestructEnabled()
batches = self._collect_as_arrow(split_batches=self_destruct)
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
# Ensure only the table has a reference to the batches, so that
# self_destruct (if enabled) is effective
del batches
# Pandas DataFrame created from PyArrow uses datetime64[ns] for date type
# values, but we should use datetime.date to match the behavior with when
# Arrow optimization is disabled.
pandas_options = {"date_as_object": True}
if self_destruct:
# Configure PyArrow to use as little memory as possible:
# self_destruct - free columns as they are converted
# split_blocks - create a separate Pandas block for each column
# use_threads - convert one column at a time
pandas_options.update(
{
"self_destruct": True,
"split_blocks": True,
"use_threads": False,
}
)
# Rename columns to avoid duplicated column names.
pdf = table.rename_columns(
[f"col_{i}" for i in range(table.num_columns)]
).to_pandas(**pandas_options)
# Rename back to the original column names.
pdf.columns = self.columns
else:
pdf = pd.DataFrame(columns=self.columns)
if len(pdf.columns) > 0:
timezone = jconf.sessionLocalTimeZone()
struct_in_pandas = jconf.pandasStructHandlingMode()
error_on_duplicated_field_names = False
if struct_in_pandas == "legacy":
error_on_duplicated_field_names = True
struct_in_pandas = "dict"
return pd.concat(
[
_create_converter_to_pandas(
field.dataType,
field.nullable,
timezone=timezone,
struct_in_pandas=struct_in_pandas,
error_on_duplicated_field_names=error_on_duplicated_field_names,
)(pser)
for (_, pser), field in zip(pdf.items(), self.schema.fields)
],
axis="columns",
)
else:
return pdf
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and can not continue. Note that "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an "
"effect on failures in the middle of "
"computation.\n %s" % str(e)
)
warn(msg)
raise
# Below is toPandas without Arrow optimization.
rows = self.collect()
if len(rows) > 0:
pdf = pd.DataFrame.from_records(
rows, index=range(len(rows)), columns=self.columns # type: ignore[arg-type]
)
else:
pdf = pd.DataFrame(columns=self.columns)
if len(pdf.columns) > 0:
timezone = jconf.sessionLocalTimeZone()
struct_in_pandas = jconf.pandasStructHandlingMode()
return pd.concat(
[
_create_converter_to_pandas(
field.dataType,
field.nullable,
timezone=timezone,
struct_in_pandas=(
"row" if struct_in_pandas == "legacy" else struct_in_pandas
),
error_on_duplicated_field_names=False,
timestamp_utc_localized=False,
)(pser)
for (_, pser), field in zip(pdf.items(), self.schema.fields)
],
axis="columns",
)
else:
return pdf
| (self) -> 'PandasDataFrameLike' |
39,431 | pyspark.sql.dataframe | to_koalas | null | def to_koalas(
self, index_col: Optional[Union[str, List[str]]] = None
) -> "PandasOnSparkDataFrame":
return self.pandas_api(index_col)
| (self, index_col: Union[str, List[str], NoneType] = None) -> 'PandasOnSparkDataFrame' |
39,432 | sparkypandy._dataframe | to_pandas | PEP8-compliant alias to toPandas() | def to_pandas(self) -> pd.DataFrame:
"""PEP8-compliant alias to toPandas()"""
# noinspection PyTypeChecker
return super().toPandas()
| (self) -> pandas.core.frame.DataFrame |
39,433 | pyspark.sql.dataframe | to_pandas_on_spark | null | def to_pandas_on_spark(
self, index_col: Optional[Union[str, List[str]]] = None
) -> "PandasOnSparkDataFrame":
warnings.warn(
"DataFrame.to_pandas_on_spark is deprecated. Use DataFrame.pandas_api instead.",
FutureWarning,
)
return self.pandas_api(index_col)
| (self, index_col: Union[str, List[str], NoneType] = None) -> 'PandasOnSparkDataFrame' |
39,434 | pyspark.sql.dataframe | transform | Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
*args
Positional arguments to pass to func.
.. versionadded:: 3.3.0
**kwargs
Keyword arguments to pass to func.
.. versionadded:: 3.3.0
Returns
-------
:class:`DataFrame`
Transformed DataFrame.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
...
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
...
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
>>> def add_n(input_df, n):
... return input_df.select([(col(col_name) + n).alias(col_name)
... for col_name in input_df.columns])
>>> df.transform(add_n, 1).transform(add_n, n=10).show()
+---+-----+
|int|float|
+---+-----+
| 12| 12.0|
| 13| 13.0|
+---+-----+
| def transform(self, func: Callable[..., "DataFrame"], *args: Any, **kwargs: Any) -> "DataFrame":
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
*args
Positional arguments to pass to func.
.. versionadded:: 3.3.0
**kwargs
Keyword arguments to pass to func.
.. versionadded:: 3.3.0
Returns
-------
:class:`DataFrame`
Transformed DataFrame.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
...
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
...
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
>>> def add_n(input_df, n):
... return input_df.select([(col(col_name) + n).alias(col_name)
... for col_name in input_df.columns])
>>> df.transform(add_n, 1).transform(add_n, n=10).show()
+---+-----+
|int|float|
+---+-----+
| 12| 12.0|
| 13| 13.0|
+---+-----+
"""
result = func(self, *args, **kwargs)
assert isinstance(
result, DataFrame
), "Func returned an instance of type [%s], " "should have been DataFrame." % type(result)
return result
| (self, func: Callable[..., pyspark.sql.dataframe.DataFrame], *args: Any, **kwargs: Any) -> pyspark.sql.dataframe.DataFrame |
39,435 | pyspark.sql.dataframe | union | Return a new :class:`DataFrame` containing the union of rows in this and another
:class:`DataFrame`.
.. versionadded:: 2.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be unioned.
Returns
-------
:class:`DataFrame`
A new :class:`DataFrame` containing the combined rows with corresponding columns.
See Also
--------
DataFrame.unionAll
Notes
-----
This method performs a SQL-style set union of the rows from both `DataFrame` objects,
with no automatic deduplication of elements.
Use the `distinct()` method to perform deduplication of rows.
The method resolves columns by position (not by name), following the standard behavior
in SQL.
Examples
--------
Example 1: Combining two DataFrames with the same schema
>>> df1 = spark.createDataFrame([(1, 'A'), (2, 'B')], ['id', 'value'])
>>> df2 = spark.createDataFrame([(3, 'C'), (4, 'D')], ['id', 'value'])
>>> df3 = df1.union(df2)
>>> df3.show()
+---+-----+
| id|value|
+---+-----+
| 1| A|
| 2| B|
| 3| C|
| 4| D|
+---+-----+
Example 2: Combining two DataFrames with different schemas
>>> from pyspark.sql.functions import lit
>>> df1 = spark.createDataFrame([("Alice", 1), ("Bob", 2)], ["name", "id"])
>>> df2 = spark.createDataFrame([(3, "Charlie"), (4, "Dave")], ["id", "name"])
>>> df1 = df1.withColumn("age", lit(30))
>>> df2 = df2.withColumn("age", lit(40))
>>> df3 = df1.union(df2)
>>> df3.show()
+-----+-------+---+
| name| id|age|
+-----+-------+---+
|Alice| 1| 30|
| Bob| 2| 30|
| 3|Charlie| 40|
| 4| Dave| 40|
+-----+-------+---+
Example 3: Combining two DataFrames with mismatched columns
>>> df1 = spark.createDataFrame([(1, 2)], ["A", "B"])
>>> df2 = spark.createDataFrame([(3, 4)], ["C", "D"])
>>> df3 = df1.union(df2)
>>> df3.show()
+---+---+
| A| B|
+---+---+
| 1| 2|
| 3| 4|
+---+---+
Example 4: Combining duplicate rows from two different DataFrames
>>> df1 = spark.createDataFrame([(1, 'A'), (2, 'B'), (3, 'C')], ['id', 'value'])
>>> df2 = spark.createDataFrame([(3, 'C'), (4, 'D')], ['id', 'value'])
>>> df3 = df1.union(df2).distinct().sort("id")
>>> df3.show()
+---+-----+
| id|value|
+---+-----+
| 1| A|
| 2| B|
| 3| C|
| 4| D|
+---+-----+
| def union(self, other: "DataFrame") -> "DataFrame":
"""Return a new :class:`DataFrame` containing the union of rows in this and another
:class:`DataFrame`.
.. versionadded:: 2.0.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be unioned.
Returns
-------
:class:`DataFrame`
A new :class:`DataFrame` containing the combined rows with corresponding columns.
See Also
--------
DataFrame.unionAll
Notes
-----
This method performs a SQL-style set union of the rows from both `DataFrame` objects,
with no automatic deduplication of elements.
Use the `distinct()` method to perform deduplication of rows.
The method resolves columns by position (not by name), following the standard behavior
in SQL.
Examples
--------
Example 1: Combining two DataFrames with the same schema
>>> df1 = spark.createDataFrame([(1, 'A'), (2, 'B')], ['id', 'value'])
>>> df2 = spark.createDataFrame([(3, 'C'), (4, 'D')], ['id', 'value'])
>>> df3 = df1.union(df2)
>>> df3.show()
+---+-----+
| id|value|
+---+-----+
| 1| A|
| 2| B|
| 3| C|
| 4| D|
+---+-----+
Example 2: Combining two DataFrames with different schemas
>>> from pyspark.sql.functions import lit
>>> df1 = spark.createDataFrame([("Alice", 1), ("Bob", 2)], ["name", "id"])
>>> df2 = spark.createDataFrame([(3, "Charlie"), (4, "Dave")], ["id", "name"])
>>> df1 = df1.withColumn("age", lit(30))
>>> df2 = df2.withColumn("age", lit(40))
>>> df3 = df1.union(df2)
>>> df3.show()
+-----+-------+---+
| name| id|age|
+-----+-------+---+
|Alice| 1| 30|
| Bob| 2| 30|
| 3|Charlie| 40|
| 4| Dave| 40|
+-----+-------+---+
Example 3: Combining two DataFrames with mismatched columns
>>> df1 = spark.createDataFrame([(1, 2)], ["A", "B"])
>>> df2 = spark.createDataFrame([(3, 4)], ["C", "D"])
>>> df3 = df1.union(df2)
>>> df3.show()
+---+---+
| A| B|
+---+---+
| 1| 2|
| 3| 4|
+---+---+
Example 4: Combining duplicate rows from two different DataFrames
>>> df1 = spark.createDataFrame([(1, 'A'), (2, 'B'), (3, 'C')], ['id', 'value'])
>>> df2 = spark.createDataFrame([(3, 'C'), (4, 'D')], ['id', 'value'])
>>> df3 = df1.union(df2).distinct().sort("id")
>>> df3.show()
+---+-----+
| id|value|
+---+-----+
| 1| A|
| 2| B|
| 3| C|
| 4| D|
+---+-----+
"""
return DataFrame(self._jdf.union(other._jdf), self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame |
39,436 | pyspark.sql.dataframe | unionAll | Return a new :class:`DataFrame` containing the union of rows in this and another
:class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined
Returns
-------
:class:`DataFrame`
A new :class:`DataFrame` containing combined rows from both dataframes.
Notes
-----
This method combines all rows from both `DataFrame` objects with no automatic
deduplication of elements.
Use the `distinct()` method to perform deduplication of rows.
:func:`unionAll` is an alias to :func:`union`
See Also
--------
DataFrame.union
| def unionAll(self, other: "DataFrame") -> "DataFrame":
"""Return a new :class:`DataFrame` containing the union of rows in this and another
:class:`DataFrame`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined
Returns
-------
:class:`DataFrame`
A new :class:`DataFrame` containing combined rows from both dataframes.
Notes
-----
This method combines all rows from both `DataFrame` objects with no automatic
deduplication of elements.
Use the `distinct()` method to perform deduplication of rows.
:func:`unionAll` is an alias to :func:`union`
See Also
--------
DataFrame.union
"""
return self.union(other)
| (self, other: pyspark.sql.dataframe.DataFrame) -> pyspark.sql.dataframe.DataFrame |
39,437 | pyspark.sql.dataframe | unionByName | Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This method performs a union operation on both input DataFrames, resolving columns by
name (rather than position). When `allowMissingColumns` is True, missing columns will
be filled with null.
.. versionadded:: 2.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined.
allowMissingColumns : bool, optional, default False
Specify whether to allow missing columns.
.. versionadded:: 3.1.0
Returns
-------
:class:`DataFrame`
A new :class:`DataFrame` containing the combined rows with corresponding
columns of the two given DataFrames.
Examples
--------
Example 1: Union of two DataFrames with same columns in different order.
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
Example 2: Union with missing columns and setting `allowMissingColumns=True`.
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|NULL|
|NULL| 4| 5| 6|
+----+----+----+----+
Example 3: Union of two DataFrames with few common columns.
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6, 7]], ["col1", "col2", "col3", "col4"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+----+
|col0|col1|col2|col3|col4|
+----+----+----+----+----+
| 1| 2| 3|NULL|NULL|
|NULL| 4| 5| 6| 7|
+----+----+----+----+----+
Example 4: Union of two DataFrames with completely different columns.
>>> df1 = spark.createDataFrame([[0, 1, 2]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[3, 4, 5]], ["col3", "col4", "col5"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+----+----+
|col0|col1|col2|col3|col4|col5|
+----+----+----+----+----+----+
| 0| 1| 2|NULL|NULL|NULL|
|NULL|NULL|NULL| 3| 4| 5|
+----+----+----+----+----+----+
| def unionByName(self, other: "DataFrame", allowMissingColumns: bool = False) -> "DataFrame":
"""Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This method performs a union operation on both input DataFrames, resolving columns by
name (rather than position). When `allowMissingColumns` is True, missing columns will
be filled with null.
.. versionadded:: 2.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
other : :class:`DataFrame`
Another :class:`DataFrame` that needs to be combined.
allowMissingColumns : bool, optional, default False
Specify whether to allow missing columns.
.. versionadded:: 3.1.0
Returns
-------
:class:`DataFrame`
A new :class:`DataFrame` containing the combined rows with corresponding
columns of the two given DataFrames.
Examples
--------
Example 1: Union of two DataFrames with same columns in different order.
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
Example 2: Union with missing columns and setting `allowMissingColumns=True`.
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|NULL|
|NULL| 4| 5| 6|
+----+----+----+----+
Example 3: Union of two DataFrames with few common columns.
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6, 7]], ["col1", "col2", "col3", "col4"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+----+
|col0|col1|col2|col3|col4|
+----+----+----+----+----+
| 1| 2| 3|NULL|NULL|
|NULL| 4| 5| 6| 7|
+----+----+----+----+----+
Example 4: Union of two DataFrames with completely different columns.
>>> df1 = spark.createDataFrame([[0, 1, 2]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[3, 4, 5]], ["col3", "col4", "col5"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+----+----+
|col0|col1|col2|col3|col4|col5|
+----+----+----+----+----+----+
| 0| 1| 2|NULL|NULL|NULL|
|NULL|NULL|NULL| 3| 4| 5|
+----+----+----+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sparkSession)
| (self, other: pyspark.sql.dataframe.DataFrame, allowMissingColumns: bool = False) -> pyspark.sql.dataframe.DataFrame |
39,438 | pyspark.sql.dataframe | unpersist | Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
Parameters
----------
blocking : bool
Whether to block until all blocks are deleted.
Returns
-------
:class:`DataFrame`
Unpersisted DataFrame.
Examples
--------
>>> df = spark.range(1)
>>> df.persist()
DataFrame[id: bigint]
>>> df.unpersist()
DataFrame[id: bigint]
>>> df = spark.range(1)
>>> df.unpersist(True)
DataFrame[id: bigint]
| def unpersist(self, blocking: bool = False) -> "DataFrame":
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
Parameters
----------
blocking : bool
Whether to block until all blocks are deleted.
Returns
-------
:class:`DataFrame`
Unpersisted DataFrame.
Examples
--------
>>> df = spark.range(1)
>>> df.persist()
DataFrame[id: bigint]
>>> df.unpersist()
DataFrame[id: bigint]
>>> df = spark.range(1)
>>> df.unpersist(True)
DataFrame[id: bigint]
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
| (self, blocking: bool = False) -> pyspark.sql.dataframe.DataFrame |
39,439 | pyspark.sql.dataframe | unpivot |
Unpivot a DataFrame from wide format to long format, optionally leaving
identifier columns set. This is the reverse to `groupBy(...).pivot(...).agg(...)`,
except for the aggregation, which cannot be reversed.
This function is useful to massage a DataFrame into a format where some
columns are identifier columns ("ids"), while all other columns ("values")
are "unpivoted" to the rows, leaving just two non-id columns, named as given
by `variableColumnName` and `valueColumnName`.
When no "id" columns are given, the unpivoted DataFrame consists of only the
"variable" and "value" columns.
The `values` columns must not be empty so at least one value must be given to be unpivoted.
When `values` is `None`, all non-id columns will be unpivoted.
All "value" columns must share a least common data type. Unless they are the same data type,
all "value" columns are cast to the nearest common data type. For instance, types
`IntegerType` and `LongType` are cast to `LongType`, while `IntegerType` and `StringType`
do not have a common data type and `unpivot` fails.
.. versionadded:: 3.4.0
Parameters
----------
ids : str, Column, tuple, list
Column(s) to use as identifiers. Can be a single column or column name,
or a list or tuple for multiple columns.
values : str, Column, tuple, list, optional
Column(s) to unpivot. Can be a single column or column name, or a list or tuple
for multiple columns. If specified, must not be empty. If not specified, uses all
columns that are not set as `ids`.
variableColumnName : str
Name of the variable column.
valueColumnName : str
Name of the value column.
Returns
-------
:class:`DataFrame`
Unpivoted DataFrame.
Notes
-----
Supports Spark Connect.
Examples
--------
>>> df = spark.createDataFrame(
... [(1, 11, 1.1), (2, 12, 1.2)],
... ["id", "int", "double"],
... )
>>> df.show()
+---+---+------+
| id|int|double|
+---+---+------+
| 1| 11| 1.1|
| 2| 12| 1.2|
+---+---+------+
>>> df.unpivot("id", ["int", "double"], "var", "val").show()
+---+------+----+
| id| var| val|
+---+------+----+
| 1| int|11.0|
| 1|double| 1.1|
| 2| int|12.0|
| 2|double| 1.2|
+---+------+----+
See Also
--------
DataFrame.melt
| def unpivot(
self,
ids: Union["ColumnOrName", List["ColumnOrName"], Tuple["ColumnOrName", ...]],
values: Optional[Union["ColumnOrName", List["ColumnOrName"], Tuple["ColumnOrName", ...]]],
variableColumnName: str,
valueColumnName: str,
) -> "DataFrame":
"""
Unpivot a DataFrame from wide format to long format, optionally leaving
identifier columns set. This is the reverse to `groupBy(...).pivot(...).agg(...)`,
except for the aggregation, which cannot be reversed.
This function is useful to massage a DataFrame into a format where some
columns are identifier columns ("ids"), while all other columns ("values")
are "unpivoted" to the rows, leaving just two non-id columns, named as given
by `variableColumnName` and `valueColumnName`.
When no "id" columns are given, the unpivoted DataFrame consists of only the
"variable" and "value" columns.
The `values` columns must not be empty so at least one value must be given to be unpivoted.
When `values` is `None`, all non-id columns will be unpivoted.
All "value" columns must share a least common data type. Unless they are the same data type,
all "value" columns are cast to the nearest common data type. For instance, types
`IntegerType` and `LongType` are cast to `LongType`, while `IntegerType` and `StringType`
do not have a common data type and `unpivot` fails.
.. versionadded:: 3.4.0
Parameters
----------
ids : str, Column, tuple, list
Column(s) to use as identifiers. Can be a single column or column name,
or a list or tuple for multiple columns.
values : str, Column, tuple, list, optional
Column(s) to unpivot. Can be a single column or column name, or a list or tuple
for multiple columns. If specified, must not be empty. If not specified, uses all
columns that are not set as `ids`.
variableColumnName : str
Name of the variable column.
valueColumnName : str
Name of the value column.
Returns
-------
:class:`DataFrame`
Unpivoted DataFrame.
Notes
-----
Supports Spark Connect.
Examples
--------
>>> df = spark.createDataFrame(
... [(1, 11, 1.1), (2, 12, 1.2)],
... ["id", "int", "double"],
... )
>>> df.show()
+---+---+------+
| id|int|double|
+---+---+------+
| 1| 11| 1.1|
| 2| 12| 1.2|
+---+---+------+
>>> df.unpivot("id", ["int", "double"], "var", "val").show()
+---+------+----+
| id| var| val|
+---+------+----+
| 1| int|11.0|
| 1|double| 1.1|
| 2| int|12.0|
| 2|double| 1.2|
+---+------+----+
See Also
--------
DataFrame.melt
"""
assert ids is not None, "ids must not be None"
def to_jcols(
cols: Union["ColumnOrName", List["ColumnOrName"], Tuple["ColumnOrName", ...]]
) -> JavaObject:
if isinstance(cols, list):
return self._jcols(*cols)
if isinstance(cols, tuple):
return self._jcols(*list(cols))
return self._jcols(cols)
jids = to_jcols(ids)
if values is None:
jdf = self._jdf.unpivotWithSeq(jids, variableColumnName, valueColumnName)
else:
jvals = to_jcols(values)
jdf = self._jdf.unpivotWithSeq(jids, jvals, variableColumnName, valueColumnName)
return DataFrame(jdf, self.sparkSession)
| (self, ids: Union[ForwardRef('ColumnOrName'), List[ForwardRef('ColumnOrName')], Tuple[ForwardRef('ColumnOrName'), ...]], values: Union[ForwardRef('ColumnOrName'), List[ForwardRef('ColumnOrName')], Tuple[ForwardRef('ColumnOrName'), ...], NoneType], variableColumnName: str, valueColumnName: str) -> 'DataFrame' |
39,440 | pyspark.sql.dataframe | filter | :func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3 | def filter(self, condition: "ColumnOrName") -> "DataFrame":
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expressions.
Returns
-------
:class:`DataFrame`
Filtered DataFrame.
Examples
--------
>>> df = spark.createDataFrame([
... (2, "Alice"), (5, "Bob")], schema=["age", "name"])
Filter by :class:`Column` instances.
>>> df.filter(df.age > 3).show()
+---+----+
|age|name|
+---+----+
| 5| Bob|
+---+----+
>>> df.where(df.age == 2).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
+---+-----+
Filter by SQL expression in a string.
>>> df.filter("age > 3").show()
+---+----+
|age|name|
+---+----+
| 5| Bob|
+---+----+
>>> df.where("age = 2").show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
+---+-----+
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise PySparkTypeError(
error_class="NOT_COLUMN_OR_STR",
message_parameters={"arg_name": "condition", "arg_type": type(condition).__name__},
)
return DataFrame(jdf, self.sparkSession)
| (self, condition) |
39,441 | pyspark.sql.dataframe | withColumn |
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Returns
-------
:class:`DataFrame`
DataFrame with new or replaced column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with multiple columns at once.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.withColumn('age2', df.age + 2).show()
+---+-----+----+
|age| name|age2|
+---+-----+----+
| 2|Alice| 4|
| 5| Bob| 7|
+---+-----+----+
| def withColumn(self, colName: str, col: Column) -> "DataFrame":
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Returns
-------
:class:`DataFrame`
DataFrame with new or replaced column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with multiple columns at once.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.withColumn('age2', df.age + 2).show()
+---+-----+----+
|age| name|age2|
+---+-----+----+
| 2|Alice| 4|
| 5| Bob| 7|
+---+-----+----+
"""
if not isinstance(col, Column):
raise PySparkTypeError(
error_class="NOT_COLUMN",
message_parameters={"arg_name": "col", "arg_type": type(col).__name__},
)
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sparkSession)
| (self, colName: str, col: pyspark.sql.column.Column) -> pyspark.sql.dataframe.DataFrame |
39,442 | pyspark.sql.dataframe | withColumnRenamed | Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if the schema doesn't contain the given column name.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Returns
-------
:class:`DataFrame`
DataFrame with renamed column.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.withColumnRenamed('age', 'age2').show()
+----+-----+
|age2| name|
+----+-----+
| 2|Alice|
| 5| Bob|
+----+-----+
| def withColumnRenamed(self, existing: str, new: str) -> "DataFrame":
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if the schema doesn't contain the given column name.
.. versionadded:: 1.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Returns
-------
:class:`DataFrame`
DataFrame with renamed column.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.withColumnRenamed('age', 'age2').show()
+----+-----+
|age2| name|
+----+-----+
| 2|Alice|
| 5| Bob|
+----+-----+
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sparkSession)
| (self, existing: str, new: str) -> pyspark.sql.dataframe.DataFrame |
39,443 | pyspark.sql.dataframe | withColumns |
Returns a new :class:`DataFrame` by adding multiple columns or replacing the
existing columns that have the same names.
The colsMap is a map of column name and column, the column must only refer to attributes
supplied by this Dataset. It is an error to add columns that refer to some other Dataset.
.. versionadded:: 3.3.0
Added support for multiple columns adding
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
colsMap : dict
a dict of column name and :class:`Column`. Currently, only a single map is supported.
Returns
-------
:class:`DataFrame`
DataFrame with new or replaced columns.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.withColumns({'age2': df.age + 2, 'age3': df.age + 3}).show()
+---+-----+----+----+
|age| name|age2|age3|
+---+-----+----+----+
| 2|Alice| 4| 5|
| 5| Bob| 7| 8|
+---+-----+----+----+
| def withColumns(self, *colsMap: Dict[str, Column]) -> "DataFrame":
"""
Returns a new :class:`DataFrame` by adding multiple columns or replacing the
existing columns that have the same names.
The colsMap is a map of column name and column, the column must only refer to attributes
supplied by this Dataset. It is an error to add columns that refer to some other Dataset.
.. versionadded:: 3.3.0
Added support for multiple columns adding
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
colsMap : dict
a dict of column name and :class:`Column`. Currently, only a single map is supported.
Returns
-------
:class:`DataFrame`
DataFrame with new or replaced columns.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df.withColumns({'age2': df.age + 2, 'age3': df.age + 3}).show()
+---+-----+----+----+
|age| name|age2|age3|
+---+-----+----+----+
| 2|Alice| 4| 5|
| 5| Bob| 7| 8|
+---+-----+----+----+
"""
# Below code is to help enable kwargs in future.
assert len(colsMap) == 1
colsMap = colsMap[0] # type: ignore[assignment]
if not isinstance(colsMap, dict):
raise PySparkTypeError(
error_class="NOT_DICT",
message_parameters={"arg_name": "colsMap", "arg_type": type(colsMap).__name__},
)
col_names = list(colsMap.keys())
cols = list(colsMap.values())
return DataFrame(
self._jdf.withColumns(_to_seq(self._sc, col_names), self._jcols(*cols)),
self.sparkSession,
)
| (self, *colsMap: Dict[str, pyspark.sql.column.Column]) -> pyspark.sql.dataframe.DataFrame |
39,444 | pyspark.sql.dataframe | withColumnsRenamed |
Returns a new :class:`DataFrame` by renaming multiple columns.
This is a no-op if the schema doesn't contain the given column names.
.. versionadded:: 3.4.0
Added support for multiple columns renaming
Parameters
----------
colsMap : dict
a dict of existing column names and corresponding desired column names.
Currently, only a single map is supported.
Returns
-------
:class:`DataFrame`
DataFrame with renamed columns.
See Also
--------
:meth:`withColumnRenamed`
Notes
-----
Support Spark Connect
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df = df.withColumns({'age2': df.age + 2, 'age3': df.age + 3})
>>> df.withColumnsRenamed({'age2': 'age4', 'age3': 'age5'}).show()
+---+-----+----+----+
|age| name|age4|age5|
+---+-----+----+----+
| 2|Alice| 4| 5|
| 5| Bob| 7| 8|
+---+-----+----+----+
| def withColumnsRenamed(self, colsMap: Dict[str, str]) -> "DataFrame":
"""
Returns a new :class:`DataFrame` by renaming multiple columns.
This is a no-op if the schema doesn't contain the given column names.
.. versionadded:: 3.4.0
Added support for multiple columns renaming
Parameters
----------
colsMap : dict
a dict of existing column names and corresponding desired column names.
Currently, only a single map is supported.
Returns
-------
:class:`DataFrame`
DataFrame with renamed columns.
See Also
--------
:meth:`withColumnRenamed`
Notes
-----
Support Spark Connect
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df = df.withColumns({'age2': df.age + 2, 'age3': df.age + 3})
>>> df.withColumnsRenamed({'age2': 'age4', 'age3': 'age5'}).show()
+---+-----+----+----+
|age| name|age4|age5|
+---+-----+----+----+
| 2|Alice| 4| 5|
| 5| Bob| 7| 8|
+---+-----+----+----+
"""
if not isinstance(colsMap, dict):
raise PySparkTypeError(
error_class="NOT_DICT",
message_parameters={"arg_name": "colsMap", "arg_type": type(colsMap).__name__},
)
return DataFrame(self._jdf.withColumnsRenamed(colsMap), self.sparkSession)
| (self, colsMap: Dict[str, str]) -> pyspark.sql.dataframe.DataFrame |
39,445 | pyspark.sql.dataframe | withMetadata | Returns a new :class:`DataFrame` by updating an existing column with metadata.
.. versionadded:: 3.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
columnName : str
string, name of the existing column to update the metadata.
metadata : dict
dict, new metadata to be assigned to df.schema[columnName].metadata
Returns
-------
:class:`DataFrame`
DataFrame with updated metadata column.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df_meta = df.withMetadata('age', {'foo': 'bar'})
>>> df_meta.schema['age'].metadata
{'foo': 'bar'}
| def withMetadata(self, columnName: str, metadata: Dict[str, Any]) -> "DataFrame":
"""Returns a new :class:`DataFrame` by updating an existing column with metadata.
.. versionadded:: 3.3.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
columnName : str
string, name of the existing column to update the metadata.
metadata : dict
dict, new metadata to be assigned to df.schema[columnName].metadata
Returns
-------
:class:`DataFrame`
DataFrame with updated metadata column.
Examples
--------
>>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"])
>>> df_meta = df.withMetadata('age', {'foo': 'bar'})
>>> df_meta.schema['age'].metadata
{'foo': 'bar'}
"""
if not isinstance(metadata, dict):
raise PySparkTypeError(
error_class="NOT_DICT",
message_parameters={"arg_name": "metadata", "arg_type": type(metadata).__name__},
)
sc = get_active_spark_context()
jmeta = cast(JVMView, sc._jvm).org.apache.spark.sql.types.Metadata.fromJson(
json.dumps(metadata)
)
return DataFrame(self._jdf.withMetadata(columnName, jmeta), self.sparkSession)
| (self, columnName: str, metadata: Dict[str, Any]) -> pyspark.sql.dataframe.DataFrame |
39,446 | pyspark.sql.dataframe | withWatermark | Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Parameters
----------
eventTime : str
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Returns
-------
:class:`DataFrame`
Watermarked DataFrame
Notes
-----
This is a feature only for Structured Streaming.
This API is evolving.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import timestamp_seconds
>>> df = spark.readStream.format("rate").load().selectExpr(
... "value % 5 AS value", "timestamp")
>>> df.select("value", df.timestamp.alias("time")).withWatermark("time", '10 minutes')
DataFrame[value: bigint, time: timestamp]
Group the data by window and value (0 - 4), and compute the count of each group.
>>> import time
>>> from pyspark.sql.functions import window
>>> query = (df
... .withWatermark("timestamp", "10 minutes")
... .groupBy(
... window(df.timestamp, "10 minutes", "5 minutes"),
... df.value)
... ).count().writeStream.outputMode("complete").format("console").start()
>>> time.sleep(3)
>>> query.stop()
| def withWatermark(self, eventTime: str, delayThreshold: str) -> "DataFrame":
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
.. versionchanged:: 3.5.0
Supports Spark Connect.
Parameters
----------
eventTime : str
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Returns
-------
:class:`DataFrame`
Watermarked DataFrame
Notes
-----
This is a feature only for Structured Streaming.
This API is evolving.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import timestamp_seconds
>>> df = spark.readStream.format("rate").load().selectExpr(
... "value % 5 AS value", "timestamp")
>>> df.select("value", df.timestamp.alias("time")).withWatermark("time", '10 minutes')
DataFrame[value: bigint, time: timestamp]
Group the data by window and value (0 - 4), and compute the count of each group.
>>> import time
>>> from pyspark.sql.functions import window
>>> query = (df
... .withWatermark("timestamp", "10 minutes")
... .groupBy(
... window(df.timestamp, "10 minutes", "5 minutes"),
... df.value)
... ).count().writeStream.outputMode("complete").format("console").start()
>>> time.sleep(3)
>>> query.stop()
"""
if not eventTime or type(eventTime) is not str:
raise PySparkTypeError(
error_class="NOT_STR",
message_parameters={"arg_name": "eventTime", "arg_type": type(eventTime).__name__},
)
if not delayThreshold or type(delayThreshold) is not str:
raise PySparkTypeError(
error_class="NOT_STR",
message_parameters={
"arg_name": "delayThreshold",
"arg_type": type(delayThreshold).__name__,
},
)
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sparkSession)
| (self, eventTime: str, delayThreshold: str) -> pyspark.sql.dataframe.DataFrame |
39,447 | pyspark.sql.dataframe | writeTo |
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
table : str
Target table name to write to.
Returns
-------
:class:`DataFrameWriterV2`
DataFrameWriterV2 to use further to specify how to save the data
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
| def writeTo(self, table: str) -> DataFrameWriterV2:
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
.. versionchanged:: 3.4.0
Supports Spark Connect.
Parameters
----------
table : str
Target table name to write to.
Returns
-------
:class:`DataFrameWriterV2`
DataFrameWriterV2 to use further to specify how to save the data
Examples
--------
>>> df = spark.createDataFrame(
... [(14, "Tom"), (23, "Alice"), (16, "Bob")], ["age", "name"])
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
| (self, table: str) -> pyspark.sql.readwriter.DataFrameWriterV2 |
39,451 | queue | Queue | Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
| class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
__class_getitem__ = classmethod(types.GenericAlias)
| (maxsize=0) |
39,452 | queue | __init__ | null | def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
| (self, maxsize=0) |
39,453 | queue | _get | null | def _get(self):
return self.queue.popleft()
| (self) |
39,454 | queue | _init | null | def _init(self, maxsize):
self.queue = deque()
| (self, maxsize) |
39,455 | queue | _put | null | def _put(self, item):
self.queue.append(item)
| (self, item) |
39,456 | queue | _qsize | null | def _qsize(self):
return len(self.queue)
| (self) |
39,457 | queue | empty | Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
| def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
| (self) |
39,458 | queue | full | Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
| def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
| (self) |
39,459 | queue | get | Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
| def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
| (self, block=True, timeout=None) |
39,460 | queue | get_nowait | Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
| def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
| (self) |
39,461 | queue | join | Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
| def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
| (self) |
39,462 | queue | put | Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
| def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
| (self, item, block=True, timeout=None) |
39,463 | queue | put_nowait | Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
| def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
| (self, item) |
39,464 | queue | qsize | Return the approximate size of the queue (not reliable!). | def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
| (self) |
39,465 | queue | task_done | Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
| def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
| (self) |
39,466 | wurlitzer | Wurlitzer | Class for Capturing Process-level FD output via dup2
Typically used via `wurlitzer.pipes`
| class Wurlitzer:
"""Class for Capturing Process-level FD output via dup2
Typically used via `wurlitzer.pipes`
"""
flush_interval = 0.2
def __init__(
self,
stdout=None,
stderr=None,
encoding=_default_encoding,
bufsize=_get_max_pipe_size(),
):
"""
Parameters
----------
stdout: stream or None
The stream for forwarding stdout.
stderr = stream or None
The stream for forwarding stderr.
encoding: str or None
The encoding to use, if streams should be interpreted as text.
bufsize: int or None
Set pipe buffer size using fcntl F_SETPIPE_SZ (linux only)
default: use /proc/sys/fs/pipe-max-size up to a max of 1MB
if 0, will do nothing.
"""
# accept logger objects
if stdout and isinstance(stdout, logging.Logger):
stdout = _LogPipe(stdout, stream_name="stdout", level=logging.INFO)
if stderr and isinstance(stderr, logging.Logger):
stderr = _LogPipe(stderr, stream_name="stderr", level=logging.ERROR)
self._stdout = stdout
if stderr == STDOUT:
self._stderr = self._stdout
else:
self._stderr = stderr
self.encoding = encoding
if bufsize is None:
bufsize = _get_max_pipe_size()
self._bufsize = bufsize
self._save_fds = {}
self._real_fds = {}
self._handlers = {}
self._handlers['stderr'] = self._handle_stderr
self._handlers['stdout'] = self._handle_stdout
def _setup_pipe(self, name):
real_fd = getattr(sys, '__%s__' % name).fileno()
save_fd = os.dup(real_fd)
self._save_fds[name] = save_fd
try:
capture_fd = getattr(self, "_" + name).fileno()
except Exception:
pass
else:
# if it has a fileno(),
# dup directly to capture file,
# no pipes needed
dup2(capture_fd, real_fd)
return None
pipe_out, pipe_in = os.pipe()
# set max pipe buffer size (linux only)
if self._bufsize:
try:
fcntl(pipe_in, F_SETPIPE_SZ, self._bufsize)
except OSError as error:
warnings.warn(
"Failed to set pipe buffer size: " + str(error), RuntimeWarning
)
dup2(pipe_in, real_fd)
os.close(pipe_in)
self._real_fds[name] = real_fd
# make pipe_out non-blocking
flags = fcntl(pipe_out, F_GETFL)
fcntl(pipe_out, F_SETFL, flags | os.O_NONBLOCK)
return pipe_out
def _decode(self, data):
"""Decode data, if any
Called before passing to stdout/stderr streams
"""
if self.encoding:
data = data.decode(self.encoding, 'replace')
return data
def _handle_stdout(self, data):
if self._stdout:
self._stdout.write(self._decode(data))
def _handle_stderr(self, data):
if self._stderr:
self._stderr.write(self._decode(data))
def _setup_handle(self):
"""Setup handle for output, if any"""
self.handle = (self._stdout, self._stderr)
def _finish_handle(self):
"""Finish handle, if anything should be done when it's all wrapped up."""
pass
def _flush(self):
"""flush sys.stdout/err and low-level FDs"""
if self._stdout and sys.stdout:
sys.stdout.flush()
if self._stderr and sys.stderr:
sys.stderr.flush()
if c_stdout_p is not None:
libc.fflush(c_stdout_p)
if c_stderr_p is not None:
libc.fflush(c_stderr_p)
def __enter__(self):
# flush anything out before starting
self._flush()
# setup handle
self._setup_handle()
# create pipe for stdout
pipes = []
names = {}
if self._stdout:
pipe = self._setup_pipe('stdout')
if pipe:
pipes.append(pipe)
names[pipe] = 'stdout'
if self._stderr:
pipe = self._setup_pipe('stderr')
if pipe:
pipes.append(pipe)
names[pipe] = 'stderr'
if not pipes:
# no pipes to handle (e.g. direct FD capture)
# so no forwarder thread needed
self.thread = None
return self.handle
# setup forwarder thread
self._control_r, self._control_w = os.pipe()
pipes.append(self._control_r)
names[self._control_r] = "control"
# flush pipes in a background thread to avoid blocking
# the reader thread when the buffer is full
flush_queue = Queue()
def flush_main():
while True:
msg = flush_queue.get()
if msg == 'stop':
return
self._flush()
flush_thread = threading.Thread(target=flush_main)
flush_thread.daemon = True
flush_thread.start()
def forwarder():
"""Forward bytes on a pipe to stream messages"""
draining = False
flush_interval = 0
poller = selectors.DefaultSelector()
for pipe_ in pipes:
poller.register(pipe_, selectors.EVENT_READ)
while pipes:
events = poller.select(flush_interval)
if events:
# found something to read, don't block select until
# we run out of things to read
flush_interval = 0
else:
# nothing to read
if draining:
# if we are draining and there's nothing to read, stop
break
else:
# nothing to read, get ready to wait.
# flush the streams in case there's something waiting
# to be written.
flush_queue.put('flush')
flush_interval = self.flush_interval
continue
for selector_key, flags in events:
fd = selector_key.fd
if fd == self._control_r:
draining = True
pipes.remove(self._control_r)
poller.unregister(self._control_r)
os.close(self._control_r)
continue
name = names[fd]
data = os.read(fd, 1024)
if not data:
# pipe closed, stop polling it
pipes.remove(fd)
poller.unregister(fd)
os.close(fd)
else:
handler = getattr(self, '_handle_%s' % name)
handler(data)
if not pipes:
# pipes closed, we are done
break
# stop flush thread
flush_queue.put('stop')
flush_thread.join()
# cleanup pipes
[os.close(pipe) for pipe in pipes]
poller.close()
self.thread = threading.Thread(target=forwarder)
self.thread.daemon = True
self.thread.start()
return self.handle
def __exit__(self, exc_type, exc_value, traceback):
# flush before exiting
self._flush()
if self.thread:
# signal output is complete on control pipe
os.write(self._control_w, b'\1')
self.thread.join()
os.close(self._control_w)
# restore original state
for name, real_fd in self._real_fds.items():
save_fd = self._save_fds[name]
dup2(save_fd, real_fd)
os.close(save_fd)
# finalize handle
self._finish_handle()
| (stdout=None, stderr=None, encoding='utf-8', bufsize=1048576) |
39,467 | wurlitzer | __enter__ | null | def __enter__(self):
# flush anything out before starting
self._flush()
# setup handle
self._setup_handle()
# create pipe for stdout
pipes = []
names = {}
if self._stdout:
pipe = self._setup_pipe('stdout')
if pipe:
pipes.append(pipe)
names[pipe] = 'stdout'
if self._stderr:
pipe = self._setup_pipe('stderr')
if pipe:
pipes.append(pipe)
names[pipe] = 'stderr'
if not pipes:
# no pipes to handle (e.g. direct FD capture)
# so no forwarder thread needed
self.thread = None
return self.handle
# setup forwarder thread
self._control_r, self._control_w = os.pipe()
pipes.append(self._control_r)
names[self._control_r] = "control"
# flush pipes in a background thread to avoid blocking
# the reader thread when the buffer is full
flush_queue = Queue()
def flush_main():
while True:
msg = flush_queue.get()
if msg == 'stop':
return
self._flush()
flush_thread = threading.Thread(target=flush_main)
flush_thread.daemon = True
flush_thread.start()
def forwarder():
"""Forward bytes on a pipe to stream messages"""
draining = False
flush_interval = 0
poller = selectors.DefaultSelector()
for pipe_ in pipes:
poller.register(pipe_, selectors.EVENT_READ)
while pipes:
events = poller.select(flush_interval)
if events:
# found something to read, don't block select until
# we run out of things to read
flush_interval = 0
else:
# nothing to read
if draining:
# if we are draining and there's nothing to read, stop
break
else:
# nothing to read, get ready to wait.
# flush the streams in case there's something waiting
# to be written.
flush_queue.put('flush')
flush_interval = self.flush_interval
continue
for selector_key, flags in events:
fd = selector_key.fd
if fd == self._control_r:
draining = True
pipes.remove(self._control_r)
poller.unregister(self._control_r)
os.close(self._control_r)
continue
name = names[fd]
data = os.read(fd, 1024)
if not data:
# pipe closed, stop polling it
pipes.remove(fd)
poller.unregister(fd)
os.close(fd)
else:
handler = getattr(self, '_handle_%s' % name)
handler(data)
if not pipes:
# pipes closed, we are done
break
# stop flush thread
flush_queue.put('stop')
flush_thread.join()
# cleanup pipes
[os.close(pipe) for pipe in pipes]
poller.close()
self.thread = threading.Thread(target=forwarder)
self.thread.daemon = True
self.thread.start()
return self.handle
| (self) |
39,468 | wurlitzer | __exit__ | null | def __exit__(self, exc_type, exc_value, traceback):
# flush before exiting
self._flush()
if self.thread:
# signal output is complete on control pipe
os.write(self._control_w, b'\1')
self.thread.join()
os.close(self._control_w)
# restore original state
for name, real_fd in self._real_fds.items():
save_fd = self._save_fds[name]
dup2(save_fd, real_fd)
os.close(save_fd)
# finalize handle
self._finish_handle()
| (self, exc_type, exc_value, traceback) |
39,469 | wurlitzer | __init__ |
Parameters
----------
stdout: stream or None
The stream for forwarding stdout.
stderr = stream or None
The stream for forwarding stderr.
encoding: str or None
The encoding to use, if streams should be interpreted as text.
bufsize: int or None
Set pipe buffer size using fcntl F_SETPIPE_SZ (linux only)
default: use /proc/sys/fs/pipe-max-size up to a max of 1MB
if 0, will do nothing.
| def __init__(
self,
stdout=None,
stderr=None,
encoding=_default_encoding,
bufsize=_get_max_pipe_size(),
):
"""
Parameters
----------
stdout: stream or None
The stream for forwarding stdout.
stderr = stream or None
The stream for forwarding stderr.
encoding: str or None
The encoding to use, if streams should be interpreted as text.
bufsize: int or None
Set pipe buffer size using fcntl F_SETPIPE_SZ (linux only)
default: use /proc/sys/fs/pipe-max-size up to a max of 1MB
if 0, will do nothing.
"""
# accept logger objects
if stdout and isinstance(stdout, logging.Logger):
stdout = _LogPipe(stdout, stream_name="stdout", level=logging.INFO)
if stderr and isinstance(stderr, logging.Logger):
stderr = _LogPipe(stderr, stream_name="stderr", level=logging.ERROR)
self._stdout = stdout
if stderr == STDOUT:
self._stderr = self._stdout
else:
self._stderr = stderr
self.encoding = encoding
if bufsize is None:
bufsize = _get_max_pipe_size()
self._bufsize = bufsize
self._save_fds = {}
self._real_fds = {}
self._handlers = {}
self._handlers['stderr'] = self._handle_stderr
self._handlers['stdout'] = self._handle_stdout
| (self, stdout=None, stderr=None, encoding='utf-8', bufsize=1048576) |
39,470 | wurlitzer | _decode | Decode data, if any
Called before passing to stdout/stderr streams
| def _decode(self, data):
"""Decode data, if any
Called before passing to stdout/stderr streams
"""
if self.encoding:
data = data.decode(self.encoding, 'replace')
return data
| (self, data) |
39,471 | wurlitzer | _finish_handle | Finish handle, if anything should be done when it's all wrapped up. | def _finish_handle(self):
"""Finish handle, if anything should be done when it's all wrapped up."""
pass
| (self) |
39,472 | wurlitzer | _flush | flush sys.stdout/err and low-level FDs | def _flush(self):
"""flush sys.stdout/err and low-level FDs"""
if self._stdout and sys.stdout:
sys.stdout.flush()
if self._stderr and sys.stderr:
sys.stderr.flush()
if c_stdout_p is not None:
libc.fflush(c_stdout_p)
if c_stderr_p is not None:
libc.fflush(c_stderr_p)
| (self) |
39,473 | wurlitzer | _handle_stderr | null | def _handle_stderr(self, data):
if self._stderr:
self._stderr.write(self._decode(data))
| (self, data) |
39,474 | wurlitzer | _handle_stdout | null | def _handle_stdout(self, data):
if self._stdout:
self._stdout.write(self._decode(data))
| (self, data) |
39,475 | wurlitzer | _setup_handle | Setup handle for output, if any | def _setup_handle(self):
"""Setup handle for output, if any"""
self.handle = (self._stdout, self._stderr)
| (self) |
39,476 | wurlitzer | _setup_pipe | null | def _setup_pipe(self, name):
real_fd = getattr(sys, '__%s__' % name).fileno()
save_fd = os.dup(real_fd)
self._save_fds[name] = save_fd
try:
capture_fd = getattr(self, "_" + name).fileno()
except Exception:
pass
else:
# if it has a fileno(),
# dup directly to capture file,
# no pipes needed
dup2(capture_fd, real_fd)
return None
pipe_out, pipe_in = os.pipe()
# set max pipe buffer size (linux only)
if self._bufsize:
try:
fcntl(pipe_in, F_SETPIPE_SZ, self._bufsize)
except OSError as error:
warnings.warn(
"Failed to set pipe buffer size: " + str(error), RuntimeWarning
)
dup2(pipe_in, real_fd)
os.close(pipe_in)
self._real_fds[name] = real_fd
# make pipe_out non-blocking
flags = fcntl(pipe_out, F_GETFL)
fcntl(pipe_out, F_SETFL, flags | os.O_NONBLOCK)
return pipe_out
| (self, name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.